From: Thomas Walker Lynch Date: Thu, 5 Dec 2024 09:41:47 +0000 (+0000) Subject: check point, updating Mosaic with pencil dirs and jdk-23 X-Git-Url: https://git.reasoningtechnology.com/style/rt_dark_doc.css?a=commitdiff_plain;h=43611a5ca993d69a3a3975e3cf6fee11d9a17eec;p=Mosaic check point, updating Mosaic with pencil dirs and jdk-23 --- diff --git a/developer/bash/.githolder b/developer/bash/.githolder new file mode 100644 index 0000000..e69de29 diff --git a/developer/bash/Mosaic b/developer/bash/Mosaic new file mode 100755 index 0000000..ba5b241 --- /dev/null +++ b/developer/bash/Mosaic @@ -0,0 +1,2 @@ +#!/bin/bash +java com.ReasoningTechnology."Mosaic".Mosaic diff --git a/developer/document/build_transcript_v1.0.txt b/developer/document/build_transcript_v1.0.txt deleted file mode 100644 index 29aa92c..0000000 --- a/developer/document/build_transcript_v1.0.txt +++ /dev/null @@ -1,58 +0,0 @@ -> cd Mosaic -> source env_developer -> emacs & - -... - -2024-11-04T11:19:53Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/developer -+ rm -r scratchpad/com -+ rm jvm/Mosaic.jar -+ rm shell/Mosaic -+ set +x -clean_build_directories done. - -2024-11-04T11:20:14Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> wipe_release -+ cd /var/user_data/Thomas-developer/Mosaic -+ rm -rf release/Mosaic release/Mosaic.jar -+ set +x -wipe_release done. - -2024-11-04T11:20:18Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> make -Compiling files... -+ javac -g -d scratchpad javac/IO.java javac/Mosaic.java javac/TestBench.java javac/Util.java -+ set +x -Creating JAR file... -+ jar_file=jvm/Mosaic.jar -+ mkdir -p jvm -+ jar cf jvm/Mosaic.jar -C scratchpad . -+ set +x -JAR file created successfully: jvm/Mosaic.jar -Creating shell wrappers... -developer/tool/make done. - -2024-11-04T11:20:40Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> release -Starting release process... -Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r -Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x -developer/tool/release done. - -2024-11-04T11:20:44Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> clean_make_output -+ cd /var/user_data/Thomas-developer/Mosaic/developer -+ rm -r scratchpad/com/ReasoningTechnology/Mosaic -+ rm jvm/Mosaic.jar -+ rm 'shell/{Mosaic}' -rm: cannot remove 'shell/{Mosaic}': No such file or directory -+ set +x -clean_make_output done. - diff --git a/developer/document/build_transcript_v1.1.txt b/developer/document/build_transcript_v1.1.txt deleted file mode 100644 index 0a00aba..0000000 --- a/developer/document/build_transcript_v1.1.txt +++ /dev/null @@ -1,63 +0,0 @@ - ---- setting up the environment: - - -024-11-08T07:40:57Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> bash - -2024-11-08T07:41:19Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> cd Mosaic - -2024-11-08T07:41:25Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> . env_developer -REPO_HOME /var/user_data/Thomas-developer/Mosaic -PROJECT Mosaic -ENV tool_shared/bespoke/env -ENV developer/tool/env - -2024-11-08T07:41:34Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> emacs & - - ---- building the release candidate - -2024-11-08T09:58:08Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/developer -+ rm -r scratchpad/com -+ rm jvm/Mosaic.jar -+ rm shell/Mosaic -+ set +x -clean_build_directories done. - -2024-11-08T09:58:16Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> make -Compiling files... -+ javac -g -d scratchpad javac/Mosaic_IO.java javac/Mosaic_Mosaic.java javac/Mosaic_Testbench.java javac/Mosaic_Util.java -+ set +x -Creating JAR file... -+ jar_file=jvm/Mosaic.jar -+ mkdir -p jvm -+ jar cf jvm/Mosaic.jar -C scratchpad . -+ set +x -JAR file created successfully: jvm/Mosaic.jar -Creating shell wrappers... -developer/tool/make done. - -2024-11-08T09:58:21Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> release -Starting release process... -Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r -Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x -developer/tool/release done. - -2024-11-08T09:58:24Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> diff --git a/developer/document/the_build_environmet.txt b/developer/document/the_build_environmet.txt deleted file mode 100644 index cd491db..0000000 --- a/developer/document/the_build_environmet.txt +++ /dev/null @@ -1,84 +0,0 @@ -1. Tool - -The directory called `tool` has tools for the developer. There are comments at -the top of each that says what it does. - -In the tool directory, `env` sets the PATH, CLASSPATH, and prepares the developer's -environment. Noting will work right until this is sourced. (This is similar to -Python's `venv`.) - -The tool called `make` builds the project. This is not the venerable `/bin/make` -but is a simple bash script. It is going to compile everything in the `javac` -directory. - -The tool called `shall_wrapper_list` gives a list of classes names that are to -be given direct call shell wrappers. `make` will put these in the `shell` -directory. - -The `clean_` scripts are there to delete files so that developers do not have -to type `rm` commands. This helps prevent accidents. Note the -$REPO_HOME/tool_shared/bespoke/wipe_release script will remove files from the -../release directory. - -2. build - -`make` runs `javac` which puts the class files into the `scratch_pad` directory. -It will `makedir` a directory hierarchy in `scratch_pad` that mirrors the -package name. - -After compiling `make` then gathers the class files found in the scratchpad -directory hierarchy and puts them into a `.jar` file. Said `.jar` file will -be located in the directory `jvm`. - -The `scratch_pad` directory is not pushed to the repo. It can be cleaned -at any time, because it can always be rebuilt. - -3. release - -The `release` script will make a copy of the scripts in `shell` and the `.jar` -file in `jvm` and put them in the `$REPO_HOME/release` directory. This -comprises the release candidate. After a release branch is made, this becomes -the actual release. Note the script in `$REPO_HOME/bespoke/version` which -outputs the version for released code. - - -4. debug - -If you use emacs note the file `$REPO_HOME/test_shared/bespoke/emacs.el'. - -Edit `make` to add or remove the `-g` flag from `javac`. This controls putting -source code information into the class files. - -After `javac` is compiled with the `-g` flag, and in the `jdb` debugger, `jdb` -will look into the `scratchpad` directory hierarchy where the sources were -put to find the sources files to display when single stepping etc. - -The `distribute_source` tool adds links into the `scratchpad` directory hierarchy -the point back into the `javac` directory. After these links are made, `jdb` -will show the sources, and should the sources be edited, the originals located -in the `javac` directory will be modified. - -5. debug from the `tester` environment - -The tester environment points at the release candidate located in the -$REPO_HOME/release directory to find the java classes. - -If this release candidate was compiled with the `-g` flag, then it will have -embedded in it source information pointing back into the -`$REPO_HOME/developer/scratchpad` directory. - -If the `distribute_source` was not called by the developer, or the scratchpad -contents have been cleaned, jdb will not be able to find the sources. -If jdb does find the sources, and the tester edits them, then the originals -in the `$REPO_HOME/developer/javac` directory will be modified. If this -behavior is not desired, then put the tester on a `core_tester_branch`, then -inspect changes before merging them back to the `core_developer_branch`. - -This setup makes it possible for developers to use the tester environment -to work, without having to be on a separate branch, or for testers to -work separately. - - - - - diff --git "a/developer/document\360\237\226\211/build_transcript_v1.0.txt" "b/developer/document\360\237\226\211/build_transcript_v1.0.txt" new file mode 100644 index 0000000..29aa92c --- /dev/null +++ "b/developer/document\360\237\226\211/build_transcript_v1.0.txt" @@ -0,0 +1,58 @@ +> cd Mosaic +> source env_developer +> emacs & + +... + +2024-11-04T11:19:53Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/developer ++ rm -r scratchpad/com ++ rm jvm/Mosaic.jar ++ rm shell/Mosaic ++ set +x +clean_build_directories done. + +2024-11-04T11:20:14Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> wipe_release ++ cd /var/user_data/Thomas-developer/Mosaic ++ rm -rf release/Mosaic release/Mosaic.jar ++ set +x +wipe_release done. + +2024-11-04T11:20:18Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> make +Compiling files... ++ javac -g -d scratchpad javac/IO.java javac/Mosaic.java javac/TestBench.java javac/Util.java ++ set +x +Creating JAR file... ++ jar_file=jvm/Mosaic.jar ++ mkdir -p jvm ++ jar cf jvm/Mosaic.jar -C scratchpad . ++ set +x +JAR file created successfully: jvm/Mosaic.jar +Creating shell wrappers... +developer/tool/make done. + +2024-11-04T11:20:40Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> release +Starting release process... +Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r +Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x +developer/tool/release done. + +2024-11-04T11:20:44Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> clean_make_output ++ cd /var/user_data/Thomas-developer/Mosaic/developer ++ rm -r scratchpad/com/ReasoningTechnology/Mosaic ++ rm jvm/Mosaic.jar ++ rm 'shell/{Mosaic}' +rm: cannot remove 'shell/{Mosaic}': No such file or directory ++ set +x +clean_make_output done. + diff --git "a/developer/document\360\237\226\211/build_transcript_v1.1.txt" "b/developer/document\360\237\226\211/build_transcript_v1.1.txt" new file mode 100644 index 0000000..0a00aba --- /dev/null +++ "b/developer/document\360\237\226\211/build_transcript_v1.1.txt" @@ -0,0 +1,63 @@ + +--- setting up the environment: + + +024-11-08T07:40:57Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> bash + +2024-11-08T07:41:19Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> cd Mosaic + +2024-11-08T07:41:25Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> . env_developer +REPO_HOME /var/user_data/Thomas-developer/Mosaic +PROJECT Mosaic +ENV tool_shared/bespoke/env +ENV developer/tool/env + +2024-11-08T07:41:34Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> emacs & + + +--- building the release candidate + +2024-11-08T09:58:08Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/developer ++ rm -r scratchpad/com ++ rm jvm/Mosaic.jar ++ rm shell/Mosaic ++ set +x +clean_build_directories done. + +2024-11-08T09:58:16Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> make +Compiling files... ++ javac -g -d scratchpad javac/Mosaic_IO.java javac/Mosaic_Mosaic.java javac/Mosaic_Testbench.java javac/Mosaic_Util.java ++ set +x +Creating JAR file... ++ jar_file=jvm/Mosaic.jar ++ mkdir -p jvm ++ jar cf jvm/Mosaic.jar -C scratchpad . ++ set +x +JAR file created successfully: jvm/Mosaic.jar +Creating shell wrappers... +developer/tool/make done. + +2024-11-08T09:58:21Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> release +Starting release process... +Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r +Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x +developer/tool/release done. + +2024-11-08T09:58:24Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> diff --git "a/developer/document\360\237\226\211/the_build_environmet.txt" "b/developer/document\360\237\226\211/the_build_environmet.txt" new file mode 100644 index 0000000..cd491db --- /dev/null +++ "b/developer/document\360\237\226\211/the_build_environmet.txt" @@ -0,0 +1,84 @@ +1. Tool + +The directory called `tool` has tools for the developer. There are comments at +the top of each that says what it does. + +In the tool directory, `env` sets the PATH, CLASSPATH, and prepares the developer's +environment. Noting will work right until this is sourced. (This is similar to +Python's `venv`.) + +The tool called `make` builds the project. This is not the venerable `/bin/make` +but is a simple bash script. It is going to compile everything in the `javac` +directory. + +The tool called `shall_wrapper_list` gives a list of classes names that are to +be given direct call shell wrappers. `make` will put these in the `shell` +directory. + +The `clean_` scripts are there to delete files so that developers do not have +to type `rm` commands. This helps prevent accidents. Note the +$REPO_HOME/tool_shared/bespoke/wipe_release script will remove files from the +../release directory. + +2. build + +`make` runs `javac` which puts the class files into the `scratch_pad` directory. +It will `makedir` a directory hierarchy in `scratch_pad` that mirrors the +package name. + +After compiling `make` then gathers the class files found in the scratchpad +directory hierarchy and puts them into a `.jar` file. Said `.jar` file will +be located in the directory `jvm`. + +The `scratch_pad` directory is not pushed to the repo. It can be cleaned +at any time, because it can always be rebuilt. + +3. release + +The `release` script will make a copy of the scripts in `shell` and the `.jar` +file in `jvm` and put them in the `$REPO_HOME/release` directory. This +comprises the release candidate. After a release branch is made, this becomes +the actual release. Note the script in `$REPO_HOME/bespoke/version` which +outputs the version for released code. + + +4. debug + +If you use emacs note the file `$REPO_HOME/test_shared/bespoke/emacs.el'. + +Edit `make` to add or remove the `-g` flag from `javac`. This controls putting +source code information into the class files. + +After `javac` is compiled with the `-g` flag, and in the `jdb` debugger, `jdb` +will look into the `scratchpad` directory hierarchy where the sources were +put to find the sources files to display when single stepping etc. + +The `distribute_source` tool adds links into the `scratchpad` directory hierarchy +the point back into the `javac` directory. After these links are made, `jdb` +will show the sources, and should the sources be edited, the originals located +in the `javac` directory will be modified. + +5. debug from the `tester` environment + +The tester environment points at the release candidate located in the +$REPO_HOME/release directory to find the java classes. + +If this release candidate was compiled with the `-g` flag, then it will have +embedded in it source information pointing back into the +`$REPO_HOME/developer/scratchpad` directory. + +If the `distribute_source` was not called by the developer, or the scratchpad +contents have been cleaned, jdb will not be able to find the sources. +If jdb does find the sources, and the tester edits them, then the originals +in the `$REPO_HOME/developer/javac` directory will be modified. If this +behavior is not desired, then put the tester on a `core_tester_branch`, then +inspect changes before merging them back to the `core_developer_branch`. + +This setup makes it possible for developers to use the tester environment +to work, without having to be on a separate branch, or for testers to +work separately. + + + + + diff --git a/developer/javac/Mosaic_IO.java b/developer/javac/Mosaic_IO.java deleted file mode 100644 index fe6bdff..0000000 --- a/developer/javac/Mosaic_IO.java +++ /dev/null @@ -1,144 +0,0 @@ -package com.ReasoningTechnology.Mosaic; -/* - The primary purpose of this class is to redirect I/O to buffers, - sot that a test can check the I/O behavior of a function under test. -*/ - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileOutputStream; -import java.io.FileInputStream; -import java.io.FileDescriptor; -import java.io.PrintStream; -import java.io.InputStream; - -public class Mosaic_IO{ - - private PrintStream original_out; - private PrintStream original_err; - private InputStream original_in; - - private ByteArrayOutputStream out_content; - private ByteArrayOutputStream err_content; - private ByteArrayInputStream in_content; - private Boolean streams_foobar = false; - private Boolean uninitialized = true; - - - // IO currently has no constructors defined, uses default - - - // Redirects IO streams, logs and handles errors if redirection fails. - // - // Most tests do not do I/O checks, so rather than throwing an error - // it will set the streams_foobar flag, then throw an error if the I/O - // functions are used. - // - // This is the only method that can set the streams_foobar flag. - public Boolean redirect(){ - - try{ - original_out = System.out; - original_err = System.err; - original_in = System.in; - - out_content = new ByteArrayOutputStream(); - err_content = new ByteArrayOutputStream(); - in_content = new ByteArrayInputStream(new byte[0]); - - System.setOut( new PrintStream(out_content) ); - System.setErr( new PrintStream(err_content) ); - System.setIn(in_content); - - uninitialized = false; - return true; - - } catch(Exception e){ - restore_hard(); - streams_foobar = true; - return false; - - } - } - - // Hard restore of the streams, resetting to system defaults - public void restore_hard(){ - System.setOut(new PrintStream( new FileOutputStream(FileDescriptor.out)) ); - System.setErr(new PrintStream( new FileOutputStream(FileDescriptor.err))) ; - System.setIn(new FileInputStream(FileDescriptor.in)); - } - - // Restores original IO streams, ensuring foobar and uninitialized states are checked. - // If anything goes wrong reverse to restore_hard. - public void restore(){ - if(uninitialized || streams_foobar){ - restore_hard(); - return; - } - try{ - System.setOut(original_out); - System.setErr(original_err); - System.setIn(original_in); - } catch(Throwable e){ - restore_hard(); - } - } - - // Clears output, error, and input buffers, checks for foobar state only. - public void clear_buffers(){ - if(streams_foobar){ - throw new IllegalStateException("Cannot clear buffers: IO object is in foobar state."); - } - out_content.reset(); - err_content.reset(); - in_content = new ByteArrayInputStream( new byte[0] ); // Reset to EOF - System.setIn(in_content); - } - - public Boolean has_out_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stdout content: IO object is in foobar state." - ); - } - return out_content.size() > 0; - } - public String get_out_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stdout content: IO object is in foobar state." - ); - } - return out_content.toString(); - } - - public Boolean has_err_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stderr content: IO object is in foobar state." - ); - } - return err_content.size() > 0; - } - public String get_err_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stderr content: IO object is in foobar state." - ); - } - return err_content.toString(); - } - - // Pushes input string onto stdin, checks foobar state only. - public void push_input(String input_data){ - if(streams_foobar){ - throw new IllegalStateException("Cannot push input: IO object is in foobar state."); - } - in_content = new ByteArrayInputStream( input_data.getBytes() ); - System.setIn(in_content); - } -} diff --git a/developer/javac/Mosaic_Mosaic.java b/developer/javac/Mosaic_Mosaic.java deleted file mode 100644 index 51e57d0..0000000 --- a/developer/javac/Mosaic_Mosaic.java +++ /dev/null @@ -1,27 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -/* -The Mosaic shell callable wrapper is currently a placeholder. Perhaps someday we -can find something for this to do. - -*/ - - -public class Mosaic_Mosaic{ - - public static Boolean test_is_true(){ - return true; - } - - public static int run(){ - System.out.println("Main function placeholder. Currently Mosaic is used by extending the TestBench class."); - return 0; - } - - public static void main(String[] args){ - int return_code = run(); - System.exit(return_code); - return; - } - -} diff --git a/developer/javac/Mosaic_Testbench.java b/developer/javac/Mosaic_Testbench.java deleted file mode 100644 index c8e0644..0000000 --- a/developer/javac/Mosaic_Testbench.java +++ /dev/null @@ -1,106 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -import java.lang.reflect.Method; - -public class Mosaic_Testbench { - - /* -------------------------------------------------------------------------------- - Validate the structure of a test method - */ - public static Boolean method_is_wellformed(Method method){ - // Check if the method returns Boolean - if(!method.getReturnType().equals(Boolean.class)){ - System.out.println("Structural problem: " + method.getName() + " does not return Boolean."); - return false; - } - - // Check if the method has exactly one argument of type Mosaic_IO - Class[] parameterTypes = method.getParameterTypes(); - if(parameterTypes == null || parameterTypes.length != 1 || !parameterTypes[0].equals(Mosaic_IO.class)){ - System.out.println("Structural problem: " + method.getName() + " does not accept a single Mosaic_IO argument."); - return false; - } - - return true; - } - - /* -------------------------------------------------------------------------------- - Run a single test method - */ - public static Boolean run_test(Object test_suite, Method method, Mosaic_IO io){ - String test_name = method.getName(); - - // Tracking possible test failures - Boolean fail_malformed = false; - Boolean fail_reported = false; - Boolean fail_exception = false; - Boolean fail_extraneous_stdout = false; - Boolean fail_extraneous_stderr = false; - String exception_string = ""; - - // Validate method structure - if(!method_is_wellformed(method)){ - System.out.println("Error: " + test_name + " has an invalid structure."); - return false; - } - - // Redirect I/O - Boolean successful_redirect = io.redirect(); - if(successful_redirect){ - io.clear_buffers(); // Start each test with empty buffers - } else { - Mosaic_Util.log_message(test_name, "Error: I/O redirection failed before running the test."); - System.out.println("Warning: Failed to redirect I/O for test: " + test_name); - } - - // Run the test and catch any exceptions - try{ - Object result = method.invoke(test_suite, io); - fail_reported = !Boolean.TRUE.equals(result); // Test passes only if it returns exactly `true` - fail_extraneous_stdout = io.has_out_content(); - fail_extraneous_stderr = io.has_err_content(); - } catch(Exception e){ - fail_exception = true; - exception_string = e.toString(); - } finally{ - io.restore(); - } - - // Report results - if(fail_reported) System.out.println("Test failed: '" + test_name + "' reported failure."); - if(fail_exception) System.out.println("Test failed: '" + test_name + "' threw an exception: " + exception_string); - if(fail_extraneous_stdout){ - System.out.println("Test failed: '" + test_name + "' produced extraneous stdout."); - Mosaic_Util.log_output(test_name, "stdout", io.get_out_content()); - } - if(fail_extraneous_stderr){ - System.out.println("Test failed: '" + test_name + "' produced extraneous stderr."); - Mosaic_Util.log_output(test_name, "stderr", io.get_err_content()); - } - - // Determine final test result - return !(fail_reported || fail_exception || fail_extraneous_stdout || fail_extraneous_stderr); - } - - /* -------------------------------------------------------------------------------- - Run all tests in the test suite - */ - public static int run(Object test_suite){ - int failed_tests = 0; - int passed_tests = 0; - Method[] methods = test_suite.getClass().getDeclaredMethods(); - Mosaic_IO io = new Mosaic_IO(); - - for(Method method : methods){ - if(run_test(test_suite, method, io)) passed_tests++; else failed_tests++; - } - - // Summary of test results - System.out.println("Total tests run: " + (passed_tests + failed_tests)); - System.out.println("Total tests passed: " + passed_tests); - System.out.println("Total tests failed: " + failed_tests); - - return (failed_tests > 0) ? 1 : 0; - } - -} diff --git a/developer/javac/Mosaic_Util.java b/developer/javac/Mosaic_Util.java deleted file mode 100644 index bb6474f..0000000 --- a/developer/javac/Mosaic_Util.java +++ /dev/null @@ -1,126 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintStream; -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.lang.reflect.Proxy; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; -import java.util.function.Predicate; - - -public class Mosaic_Util{ - - // Linear search with a predicate - public static T find( T[] elements ,Predicate predicate ){ - for( T element : elements ){ - if( predicate.test( element )) return element; // Return the first match - } - return null; // Return null if no element satisfies the predicate - } - - // True when it does a search and finds a true value; otherwise false. - public static Boolean exists( Object[] elements ){ - return elements.length > 0 && find( elements ,element -> (element instanceof Boolean) && (Boolean) element ) != null; - } - - // True when it does a search and does not find a false value; otherwise false. - public static Boolean all( Object[] elements ){ - return elements.length > 0 && find( elements ,element -> !(element instanceof Boolean) || !(Boolean) element ) == null; - } - - public static void all_set_false( Boolean[] condition_list ){ - int i = 0; - while(i < condition_list.length){ - condition_list[i] = false; - i++; - } - } - - public static void all_set_true( Boolean[] condition_list ){ - int i = 0; - while(i < condition_list.length){ - condition_list[i] = true; - i++; - } - } - - public static String iso_utc_time(){ - return Instant.now().atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT); - } - - // used to report if a test completed with data still on an output streams - public static void log_output(String test_name ,String stream ,String output_data){ - try(FileWriter log_writer = new FileWriter("test_log.txt" ,true)){ // Append mode - log_writer.write("\n" + iso_utc_time() + " -----------------------------------------------------------\n"); - log_writer.write("Test: " + test_name + "\n"); - log_writer.write("Stream: " + stream + "\n"); - log_writer.write("Output:\n" + output_data + "\n"); - } catch(IOException e) { - System.err.println("Error writing to log for test: " + test_name + ", stream: " + stream); - e.printStackTrace(System.err); - } - } - - // used to log a general message about a test - public static void log_message(String test_name ,String message){ - try(FileWriter log_writer = new FileWriter("test_log.txt" ,true)){ // Append mode - log_writer.write("\n" + iso_utc_time() + " -----------------------------------------------------------\n"); - log_writer.write("Test: " + test_name + "\n"); - log_writer.write("Message:\n" + message + "\n"); - } catch(IOException e){ - System.err.println - ( - "Error writing message \"" - + message - + "\" to log for test \'" - + test_name - + "\'" - ); - e.printStackTrace(System.err); - } - } - - public static Object make_all_public_methods_proxy( Class class_metadata ) { - try { - // Check if the class is public - int modifiers = class_metadata.getModifiers(); - if (!java.lang.reflect.Modifier.isPublic( modifiers )) { - throw new IllegalAccessException( - "The class " + class_metadata.getName() + " is not public and cannot be proxied." - ); - } - - // Create the proxy - Object proxy = java.lang.reflect.Proxy.newProxyInstance( - class_metadata.getClassLoader() - ,class_metadata.getInterfaces() - ,(proxy_object ,method ,args) -> { - Method original_method = class_metadata.getDeclaredMethod( - method.getName() - ,method.getParameterTypes() - ); - original_method.setAccessible( true ); - Object real_instance = class_metadata.getDeclaredConstructor().newInstance(); - return original_method.invoke( real_instance ,args ); - } - ); - - return proxy; - - } catch (Exception e) { - throw new RuntimeException( - "Failed to create proxy for class: " + class_metadata.getName() - ,e - ); - } - } - - -} diff --git "a/developer/javac\360\237\226\211/Mosaic_IO.java" "b/developer/javac\360\237\226\211/Mosaic_IO.java" new file mode 100644 index 0000000..fe6bdff --- /dev/null +++ "b/developer/javac\360\237\226\211/Mosaic_IO.java" @@ -0,0 +1,144 @@ +package com.ReasoningTechnology.Mosaic; +/* + The primary purpose of this class is to redirect I/O to buffers, + sot that a test can check the I/O behavior of a function under test. +*/ + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.FileOutputStream; +import java.io.FileInputStream; +import java.io.FileDescriptor; +import java.io.PrintStream; +import java.io.InputStream; + +public class Mosaic_IO{ + + private PrintStream original_out; + private PrintStream original_err; + private InputStream original_in; + + private ByteArrayOutputStream out_content; + private ByteArrayOutputStream err_content; + private ByteArrayInputStream in_content; + private Boolean streams_foobar = false; + private Boolean uninitialized = true; + + + // IO currently has no constructors defined, uses default + + + // Redirects IO streams, logs and handles errors if redirection fails. + // + // Most tests do not do I/O checks, so rather than throwing an error + // it will set the streams_foobar flag, then throw an error if the I/O + // functions are used. + // + // This is the only method that can set the streams_foobar flag. + public Boolean redirect(){ + + try{ + original_out = System.out; + original_err = System.err; + original_in = System.in; + + out_content = new ByteArrayOutputStream(); + err_content = new ByteArrayOutputStream(); + in_content = new ByteArrayInputStream(new byte[0]); + + System.setOut( new PrintStream(out_content) ); + System.setErr( new PrintStream(err_content) ); + System.setIn(in_content); + + uninitialized = false; + return true; + + } catch(Exception e){ + restore_hard(); + streams_foobar = true; + return false; + + } + } + + // Hard restore of the streams, resetting to system defaults + public void restore_hard(){ + System.setOut(new PrintStream( new FileOutputStream(FileDescriptor.out)) ); + System.setErr(new PrintStream( new FileOutputStream(FileDescriptor.err))) ; + System.setIn(new FileInputStream(FileDescriptor.in)); + } + + // Restores original IO streams, ensuring foobar and uninitialized states are checked. + // If anything goes wrong reverse to restore_hard. + public void restore(){ + if(uninitialized || streams_foobar){ + restore_hard(); + return; + } + try{ + System.setOut(original_out); + System.setErr(original_err); + System.setIn(original_in); + } catch(Throwable e){ + restore_hard(); + } + } + + // Clears output, error, and input buffers, checks for foobar state only. + public void clear_buffers(){ + if(streams_foobar){ + throw new IllegalStateException("Cannot clear buffers: IO object is in foobar state."); + } + out_content.reset(); + err_content.reset(); + in_content = new ByteArrayInputStream( new byte[0] ); // Reset to EOF + System.setIn(in_content); + } + + public Boolean has_out_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stdout content: IO object is in foobar state." + ); + } + return out_content.size() > 0; + } + public String get_out_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stdout content: IO object is in foobar state." + ); + } + return out_content.toString(); + } + + public Boolean has_err_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stderr content: IO object is in foobar state." + ); + } + return err_content.size() > 0; + } + public String get_err_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stderr content: IO object is in foobar state." + ); + } + return err_content.toString(); + } + + // Pushes input string onto stdin, checks foobar state only. + public void push_input(String input_data){ + if(streams_foobar){ + throw new IllegalStateException("Cannot push input: IO object is in foobar state."); + } + in_content = new ByteArrayInputStream( input_data.getBytes() ); + System.setIn(in_content); + } +} diff --git "a/developer/javac\360\237\226\211/Mosaic_Mosaic.java" "b/developer/javac\360\237\226\211/Mosaic_Mosaic.java" new file mode 100644 index 0000000..51e57d0 --- /dev/null +++ "b/developer/javac\360\237\226\211/Mosaic_Mosaic.java" @@ -0,0 +1,27 @@ +package com.ReasoningTechnology.Mosaic; + +/* +The Mosaic shell callable wrapper is currently a placeholder. Perhaps someday we +can find something for this to do. + +*/ + + +public class Mosaic_Mosaic{ + + public static Boolean test_is_true(){ + return true; + } + + public static int run(){ + System.out.println("Main function placeholder. Currently Mosaic is used by extending the TestBench class."); + return 0; + } + + public static void main(String[] args){ + int return_code = run(); + System.exit(return_code); + return; + } + +} diff --git "a/developer/javac\360\237\226\211/Mosaic_Testbench.java" "b/developer/javac\360\237\226\211/Mosaic_Testbench.java" new file mode 100644 index 0000000..c8e0644 --- /dev/null +++ "b/developer/javac\360\237\226\211/Mosaic_Testbench.java" @@ -0,0 +1,106 @@ +package com.ReasoningTechnology.Mosaic; + +import java.lang.reflect.Method; + +public class Mosaic_Testbench { + + /* -------------------------------------------------------------------------------- + Validate the structure of a test method + */ + public static Boolean method_is_wellformed(Method method){ + // Check if the method returns Boolean + if(!method.getReturnType().equals(Boolean.class)){ + System.out.println("Structural problem: " + method.getName() + " does not return Boolean."); + return false; + } + + // Check if the method has exactly one argument of type Mosaic_IO + Class[] parameterTypes = method.getParameterTypes(); + if(parameterTypes == null || parameterTypes.length != 1 || !parameterTypes[0].equals(Mosaic_IO.class)){ + System.out.println("Structural problem: " + method.getName() + " does not accept a single Mosaic_IO argument."); + return false; + } + + return true; + } + + /* -------------------------------------------------------------------------------- + Run a single test method + */ + public static Boolean run_test(Object test_suite, Method method, Mosaic_IO io){ + String test_name = method.getName(); + + // Tracking possible test failures + Boolean fail_malformed = false; + Boolean fail_reported = false; + Boolean fail_exception = false; + Boolean fail_extraneous_stdout = false; + Boolean fail_extraneous_stderr = false; + String exception_string = ""; + + // Validate method structure + if(!method_is_wellformed(method)){ + System.out.println("Error: " + test_name + " has an invalid structure."); + return false; + } + + // Redirect I/O + Boolean successful_redirect = io.redirect(); + if(successful_redirect){ + io.clear_buffers(); // Start each test with empty buffers + } else { + Mosaic_Util.log_message(test_name, "Error: I/O redirection failed before running the test."); + System.out.println("Warning: Failed to redirect I/O for test: " + test_name); + } + + // Run the test and catch any exceptions + try{ + Object result = method.invoke(test_suite, io); + fail_reported = !Boolean.TRUE.equals(result); // Test passes only if it returns exactly `true` + fail_extraneous_stdout = io.has_out_content(); + fail_extraneous_stderr = io.has_err_content(); + } catch(Exception e){ + fail_exception = true; + exception_string = e.toString(); + } finally{ + io.restore(); + } + + // Report results + if(fail_reported) System.out.println("Test failed: '" + test_name + "' reported failure."); + if(fail_exception) System.out.println("Test failed: '" + test_name + "' threw an exception: " + exception_string); + if(fail_extraneous_stdout){ + System.out.println("Test failed: '" + test_name + "' produced extraneous stdout."); + Mosaic_Util.log_output(test_name, "stdout", io.get_out_content()); + } + if(fail_extraneous_stderr){ + System.out.println("Test failed: '" + test_name + "' produced extraneous stderr."); + Mosaic_Util.log_output(test_name, "stderr", io.get_err_content()); + } + + // Determine final test result + return !(fail_reported || fail_exception || fail_extraneous_stdout || fail_extraneous_stderr); + } + + /* -------------------------------------------------------------------------------- + Run all tests in the test suite + */ + public static int run(Object test_suite){ + int failed_tests = 0; + int passed_tests = 0; + Method[] methods = test_suite.getClass().getDeclaredMethods(); + Mosaic_IO io = new Mosaic_IO(); + + for(Method method : methods){ + if(run_test(test_suite, method, io)) passed_tests++; else failed_tests++; + } + + // Summary of test results + System.out.println("Total tests run: " + (passed_tests + failed_tests)); + System.out.println("Total tests passed: " + passed_tests); + System.out.println("Total tests failed: " + failed_tests); + + return (failed_tests > 0) ? 1 : 0; + } + +} diff --git "a/developer/javac\360\237\226\211/Mosaic_Util.java" "b/developer/javac\360\237\226\211/Mosaic_Util.java" new file mode 100644 index 0000000..bb6474f --- /dev/null +++ "b/developer/javac\360\237\226\211/Mosaic_Util.java" @@ -0,0 +1,126 @@ +package com.ReasoningTechnology.Mosaic; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintStream; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.lang.reflect.Proxy; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.function.Predicate; + + +public class Mosaic_Util{ + + // Linear search with a predicate + public static T find( T[] elements ,Predicate predicate ){ + for( T element : elements ){ + if( predicate.test( element )) return element; // Return the first match + } + return null; // Return null if no element satisfies the predicate + } + + // True when it does a search and finds a true value; otherwise false. + public static Boolean exists( Object[] elements ){ + return elements.length > 0 && find( elements ,element -> (element instanceof Boolean) && (Boolean) element ) != null; + } + + // True when it does a search and does not find a false value; otherwise false. + public static Boolean all( Object[] elements ){ + return elements.length > 0 && find( elements ,element -> !(element instanceof Boolean) || !(Boolean) element ) == null; + } + + public static void all_set_false( Boolean[] condition_list ){ + int i = 0; + while(i < condition_list.length){ + condition_list[i] = false; + i++; + } + } + + public static void all_set_true( Boolean[] condition_list ){ + int i = 0; + while(i < condition_list.length){ + condition_list[i] = true; + i++; + } + } + + public static String iso_utc_time(){ + return Instant.now().atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT); + } + + // used to report if a test completed with data still on an output streams + public static void log_output(String test_name ,String stream ,String output_data){ + try(FileWriter log_writer = new FileWriter("test_log.txt" ,true)){ // Append mode + log_writer.write("\n" + iso_utc_time() + " -----------------------------------------------------------\n"); + log_writer.write("Test: " + test_name + "\n"); + log_writer.write("Stream: " + stream + "\n"); + log_writer.write("Output:\n" + output_data + "\n"); + } catch(IOException e) { + System.err.println("Error writing to log for test: " + test_name + ", stream: " + stream); + e.printStackTrace(System.err); + } + } + + // used to log a general message about a test + public static void log_message(String test_name ,String message){ + try(FileWriter log_writer = new FileWriter("test_log.txt" ,true)){ // Append mode + log_writer.write("\n" + iso_utc_time() + " -----------------------------------------------------------\n"); + log_writer.write("Test: " + test_name + "\n"); + log_writer.write("Message:\n" + message + "\n"); + } catch(IOException e){ + System.err.println + ( + "Error writing message \"" + + message + + "\" to log for test \'" + + test_name + + "\'" + ); + e.printStackTrace(System.err); + } + } + + public static Object make_all_public_methods_proxy( Class class_metadata ) { + try { + // Check if the class is public + int modifiers = class_metadata.getModifiers(); + if (!java.lang.reflect.Modifier.isPublic( modifiers )) { + throw new IllegalAccessException( + "The class " + class_metadata.getName() + " is not public and cannot be proxied." + ); + } + + // Create the proxy + Object proxy = java.lang.reflect.Proxy.newProxyInstance( + class_metadata.getClassLoader() + ,class_metadata.getInterfaces() + ,(proxy_object ,method ,args) -> { + Method original_method = class_metadata.getDeclaredMethod( + method.getName() + ,method.getParameterTypes() + ); + original_method.setAccessible( true ); + Object real_instance = class_metadata.getDeclaredConstructor().newInstance(); + return original_method.invoke( real_instance ,args ); + } + ); + + return proxy; + + } catch (Exception e) { + throw new RuntimeException( + "Failed to create proxy for class: " + class_metadata.getName() + ,e + ); + } + } + + +} diff --git a/developer/shell/.githolder b/developer/shell/.githolder deleted file mode 100644 index e69de29..0000000 diff --git a/developer/shell/Mosaic b/developer/shell/Mosaic deleted file mode 100755 index ba5b241..0000000 --- a/developer/shell/Mosaic +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -java com.ReasoningTechnology."Mosaic".Mosaic diff --git a/developer/tool/clean_build_directories b/developer/tool/clean_build_directories deleted file mode 100755 index 7091d81..0000000 --- a/developer/tool/clean_build_directories +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# Removes all files found in the build directories. It asks no questions as to -# how or why the files got there. Be especially careful with the 'shell' -# directory if you have authored scripts for release, add a `shell-leaf` -# directory instead of putting them in `shell`. - -# input guards - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - -# remove files - set -x - cd "$REPO_HOME"/developer - rm -r scratchpad/* - rm jvm/* - rm shell/* - set +x - -echo "$(script_fn) done." - diff --git a/developer/tool/clean_javac_output b/developer/tool/clean_javac_output deleted file mode 100755 index 5ebeb51..0000000 --- a/developer/tool/clean_javac_output +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") -# remove all files created by make's call to `javac` - -# input guards - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - -# remove files - set -x - cd "$REPO_HOME"/developer - rm -r scratchpad/com/ReasoningTechnology/"$PROJECT" - set +x - -echo "$(script_fn) done." diff --git a/developer/tool/clean_make_output b/developer/tool/clean_make_output deleted file mode 100755 index a7c6ebf..0000000 --- a/developer/tool/clean_make_output +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") -# remove all files made by `make` - -# input guards - - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - -# wrappers to clean (this list space separated list will grow) - - wrapper=$(shell_wrapper_list) - -# remove files - - set -x - cd "$REPO_HOME"/developer - rm -r scratchpad/com/ReasoningTechnology/"$PROJECT" - rm jvm/"$PROJECT".jar - rm shell/{$wrapper} - set +x - -echo "$(script_fn) done." diff --git a/developer/tool/clean_release b/developer/tool/clean_release deleted file mode 100755 index a33f19a..0000000 --- a/developer/tool/clean_release +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") -# remove files made by `make` and by `release` - -# input guards - - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - -# things to clean - - release_dir="$REPO_HOME"/release - wrapper=$(shell_wrapper_list) - -# remove files - set -x - cd "$REPO_HOME"/developer - rm -r scratchpad/com/ReasoningTechnology/"$PROJECT" - rm jvm/"$PROJECT".jar - rm shell/{$wrapper} - rm -f "$release_dir"/"$PROJECT".jar - rm -f "$release_dir"/{$wrapper} - set +x - -echo "$(script_fn) done." - diff --git a/developer/tool/distribute_source b/developer/tool/distribute_source deleted file mode 100755 index faf844d..0000000 --- a/developer/tool/distribute_source +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# This script links the sources into the directory tree in parallel to the package. - -# Input guards - - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - -# Link sources into the package tree - - package_tree="scratchpad/com/ReasoningTechnology/$PROJECT" - mkdir -p "$package_tree" - echo "Package: $package_tree" - - echo -n "Linking:" - for source_file in javac/*.java; do - echo -n " $(basename "$source_file")" - link_target="$package_tree/$(basename "$source_file")" - if [ ! -L "$link_target" ]; then - ln -s "$(realpath --relative-to="$package_tree" "$source_file")" "$link_target" - fi - done - echo "." - -echo "$(script_fp) done." diff --git a/developer/tool/env b/developer/tool/env deleted file mode 100644 index 66446d4..0000000 --- a/developer/tool/env +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="tool_shared/bespoke/env" - error=false - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - error=true - fi - if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - echo "$script_afp:: This script must be sourced, not executed." - error=true - fi - if $error; then exit 1; fi - -# so we can do the build - -export PATH=\ -"$REPO_HOME"/developer/tool/\ -:"$REPO_HOME"/tool_shared/bespoke/\ -:"$JAVA_HOME"/bin\ -:"$PATH" - -# so we can run the stuff we built locally. - -export CLASSPATH=\ -"$REPO_HOME"/developer/jvm\ -:"$REPO_HOME"/developer/jvm/"$PROJECT".jar\ -:"$JAVA_HOME"/lib\ -:"$CLASSPATH" - -export PATH=\ -"$REPO_HOME"/developer/shell\ -:"$PATH" - -# misc - - # make .githolder and .gitignore visible - alias ls="ls -a" - -# some feedback to show all went well - - export PROMPT_DECOR="$PROJECT"_developer - export ENV=$(script_fp) - echo ENV "$ENV" - cd "$REPO_HOME"/developer/ - - - diff --git a/developer/tool/make b/developer/tool/make deleted file mode 100755 index be84a0e..0000000 --- a/developer/tool/make +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - -echo "Compiling files..." - set -x - javac -g -d scratchpad javac/*.java - set +x - if [ $? -ne 0 ]; then - echo "Compilation failed." - exit 1 - fi - -echo "Creating JAR file..." - set -x - jar_file=jvm/"$PROJECT".jar - mkdir -p jvm - jar cf $jar_file -C scratchpad . - set +x - if [ $? -eq 0 ]; then - echo "JAR file created successfully: $jar_file" - else - echo "Failed to create JAR file." - exit 1 - fi - -echo "Creating shell wrappers..." - mkdir -p shell - # wrapper is a space separated list - wrapper=$(shell_wrapper_list) - for file in $wrapper;do - cat > shell/$file << EOL -#!/bin/bash -java com.ReasoningTechnology."$PROJECT".$file -EOL - chmod +x shell/$file - done - -echo "$(script_fp) done." - diff --git a/developer/tool/release b/developer/tool/release deleted file mode 100755 index bcf4686..0000000 --- a/developer/tool/release +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - if [ -z "$REPO_HOME" ]; then - echo "$(script_fp):: REPO_HOME is not set." - exit 1 - fi - - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - -# script local environment - - release_dir="$REPO_HOME/release" - shell_dir="$REPO_HOME/developer/shell" - project_jar_fp="$REPO_HOME/developer/jvm/"$PROJECT".jar" - wrapper=$(shell_wrapper_list) - - - if [ ! -d "$release_dir" ]; then - mkdir -p "$release_dir" - fi - - # Function to copy and set permissions - install_file() { - source_fp="$1" - target_dp="$2" - perms="$3" - - target_file="$target_dp/$(basename "$source_fp")" - - if [ ! -f "$source_fp" ]; then - echo "install_file:: Source file '$source_fp' does not exist." - return 1 - fi - - if ! install -m "$perms" "$source_fp" "$target_file"; then - echo "Error: Failed to install $(basename "$source_fp") to $target_dp" - exit 1 - else - echo "Installed $(basename "$source_fp") to $target_dp with permissions $perms" - fi - } - -# do the release - - echo "Starting release process..." - - # Install the JAR file - install_file "$project_jar_fp" "$release_dir" "ug+r" - - # Install shell wrappers - for wrapper in $wrapper; do - install_file "$shell_dir/$wrapper" "$release_dir" "ug+r+x" - done - -echo "$(script_fp) done." diff --git a/developer/tool/shell_wrapper_list b/developer/tool/shell_wrapper_list deleted file mode 100755 index c95affe..0000000 --- a/developer/tool/shell_wrapper_list +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="developer/tool/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - -# list of classes that have main calls and get shell wrappers -echo Mosaic diff --git "a/developer/tool\360\237\226\211/clean_build_directories" "b/developer/tool\360\237\226\211/clean_build_directories" new file mode 100755 index 0000000..7091d81 --- /dev/null +++ "b/developer/tool\360\237\226\211/clean_build_directories" @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# Removes all files found in the build directories. It asks no questions as to +# how or why the files got there. Be especially careful with the 'shell' +# directory if you have authored scripts for release, add a `shell-leaf` +# directory instead of putting them in `shell`. + +# input guards + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + +# remove files + set -x + cd "$REPO_HOME"/developer + rm -r scratchpad/* + rm jvm/* + rm shell/* + set +x + +echo "$(script_fn) done." + diff --git "a/developer/tool\360\237\226\211/clean_javac_output" "b/developer/tool\360\237\226\211/clean_javac_output" new file mode 100755 index 0000000..5ebeb51 --- /dev/null +++ "b/developer/tool\360\237\226\211/clean_javac_output" @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") +# remove all files created by make's call to `javac` + +# input guards + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + +# remove files + set -x + cd "$REPO_HOME"/developer + rm -r scratchpad/com/ReasoningTechnology/"$PROJECT" + set +x + +echo "$(script_fn) done." diff --git "a/developer/tool\360\237\226\211/clean_make_output" "b/developer/tool\360\237\226\211/clean_make_output" new file mode 100755 index 0000000..a7c6ebf --- /dev/null +++ "b/developer/tool\360\237\226\211/clean_make_output" @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") +# remove all files made by `make` + +# input guards + + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + +# wrappers to clean (this list space separated list will grow) + + wrapper=$(shell_wrapper_list) + +# remove files + + set -x + cd "$REPO_HOME"/developer + rm -r scratchpad/com/ReasoningTechnology/"$PROJECT" + rm jvm/"$PROJECT".jar + rm shell/{$wrapper} + set +x + +echo "$(script_fn) done." diff --git "a/developer/tool\360\237\226\211/clean_release" "b/developer/tool\360\237\226\211/clean_release" new file mode 100755 index 0000000..a33f19a --- /dev/null +++ "b/developer/tool\360\237\226\211/clean_release" @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") +# remove files made by `make` and by `release` + +# input guards + + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + +# things to clean + + release_dir="$REPO_HOME"/release + wrapper=$(shell_wrapper_list) + +# remove files + set -x + cd "$REPO_HOME"/developer + rm -r scratchpad/com/ReasoningTechnology/"$PROJECT" + rm jvm/"$PROJECT".jar + rm shell/{$wrapper} + rm -f "$release_dir"/"$PROJECT".jar + rm -f "$release_dir"/{$wrapper} + set +x + +echo "$(script_fn) done." + diff --git "a/developer/tool\360\237\226\211/distribute_source" "b/developer/tool\360\237\226\211/distribute_source" new file mode 100755 index 0000000..faf844d --- /dev/null +++ "b/developer/tool\360\237\226\211/distribute_source" @@ -0,0 +1,32 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# This script links the sources into the directory tree in parallel to the package. + +# Input guards + + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + +# Link sources into the package tree + + package_tree="scratchpad/com/ReasoningTechnology/$PROJECT" + mkdir -p "$package_tree" + echo "Package: $package_tree" + + echo -n "Linking:" + for source_file in javac/*.java; do + echo -n " $(basename "$source_file")" + link_target="$package_tree/$(basename "$source_file")" + if [ ! -L "$link_target" ]; then + ln -s "$(realpath --relative-to="$package_tree" "$source_file")" "$link_target" + fi + done + echo "." + +echo "$(script_fp) done." diff --git "a/developer/tool\360\237\226\211/env" "b/developer/tool\360\237\226\211/env" new file mode 100644 index 0000000..66446d4 --- /dev/null +++ "b/developer/tool\360\237\226\211/env" @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="tool_shared/bespoke/env" + error=false + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + error=true + fi + if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "$script_afp:: This script must be sourced, not executed." + error=true + fi + if $error; then exit 1; fi + +# so we can do the build + +export PATH=\ +"$REPO_HOME"/developer/tool/\ +:"$REPO_HOME"/tool_shared/bespoke/\ +:"$JAVA_HOME"/bin\ +:"$PATH" + +# so we can run the stuff we built locally. + +export CLASSPATH=\ +"$REPO_HOME"/developer/jvm\ +:"$REPO_HOME"/developer/jvm/"$PROJECT".jar\ +:"$JAVA_HOME"/lib\ +:"$CLASSPATH" + +export PATH=\ +"$REPO_HOME"/developer/shell\ +:"$PATH" + +# misc + + # make .githolder and .gitignore visible + alias ls="ls -a" + +# some feedback to show all went well + + export PROMPT_DECOR="$PROJECT"_developer + export ENV=$(script_fp) + echo ENV "$ENV" + cd "$REPO_HOME"/developer/ + + + diff --git "a/developer/tool\360\237\226\211/make" "b/developer/tool\360\237\226\211/make" new file mode 100755 index 0000000..be84a0e --- /dev/null +++ "b/developer/tool\360\237\226\211/make" @@ -0,0 +1,49 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + +echo "Compiling files..." + set -x + javac -g -d scratchpad javac/*.java + set +x + if [ $? -ne 0 ]; then + echo "Compilation failed." + exit 1 + fi + +echo "Creating JAR file..." + set -x + jar_file=jvm/"$PROJECT".jar + mkdir -p jvm + jar cf $jar_file -C scratchpad . + set +x + if [ $? -eq 0 ]; then + echo "JAR file created successfully: $jar_file" + else + echo "Failed to create JAR file." + exit 1 + fi + +echo "Creating shell wrappers..." + mkdir -p shell + # wrapper is a space separated list + wrapper=$(shell_wrapper_list) + for file in $wrapper;do + cat > shell/$file << EOL +#!/bin/bash +java com.ReasoningTechnology."$PROJECT".$file +EOL + chmod +x shell/$file + done + +echo "$(script_fp) done." + diff --git "a/developer/tool\360\237\226\211/release" "b/developer/tool\360\237\226\211/release" new file mode 100755 index 0000000..bcf4686 --- /dev/null +++ "b/developer/tool\360\237\226\211/release" @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + if [ -z "$REPO_HOME" ]; then + echo "$(script_fp):: REPO_HOME is not set." + exit 1 + fi + + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + +# script local environment + + release_dir="$REPO_HOME/release" + shell_dir="$REPO_HOME/developer/shell" + project_jar_fp="$REPO_HOME/developer/jvm/"$PROJECT".jar" + wrapper=$(shell_wrapper_list) + + + if [ ! -d "$release_dir" ]; then + mkdir -p "$release_dir" + fi + + # Function to copy and set permissions + install_file() { + source_fp="$1" + target_dp="$2" + perms="$3" + + target_file="$target_dp/$(basename "$source_fp")" + + if [ ! -f "$source_fp" ]; then + echo "install_file:: Source file '$source_fp' does not exist." + return 1 + fi + + if ! install -m "$perms" "$source_fp" "$target_file"; then + echo "Error: Failed to install $(basename "$source_fp") to $target_dp" + exit 1 + else + echo "Installed $(basename "$source_fp") to $target_dp with permissions $perms" + fi + } + +# do the release + + echo "Starting release process..." + + # Install the JAR file + install_file "$project_jar_fp" "$release_dir" "ug+r" + + # Install shell wrappers + for wrapper in $wrapper; do + install_file "$shell_dir/$wrapper" "$release_dir" "ug+r+x" + done + +echo "$(script_fp) done." diff --git "a/developer/tool\360\237\226\211/shell_wrapper_list" "b/developer/tool\360\237\226\211/shell_wrapper_list" new file mode 100755 index 0000000..c95affe --- /dev/null +++ "b/developer/tool\360\237\226\211/shell_wrapper_list" @@ -0,0 +1,15 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="developer/tool/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + +# list of classes that have main calls and get shell wrappers +echo Mosaic diff --git a/document/An_Introduction_to_Structured_Testing.html b/document/An_Introduction_to_Structured_Testing.html deleted file mode 100644 index 384beb2..0000000 --- a/document/An_Introduction_to_Structured_Testing.html +++ /dev/null @@ -1,1034 +0,0 @@ - - - - - - - White Box Testing - Mosaic Project - - - -
-
-

An Introduction to Structured Testing

-

© 2024 Thomas Walker Lynch - All Rights Reserved.

-
- - -

Introduction

- -

This guide provides a general overview of testing concepts. It is - not a reference manual for the Mosaic Testbench itself. At the - time of writing, no such reference document exists, so developers and - testers are advised to consult the source code directly for implementation - details. A small example can be found in the Test_MockClass - file within the tester directory. Other examples can be found in projects - that make use of Mosaic.

- -

A typical testing setup comprises three main components: - the Testbench, the test - routines, and a collection of units under - test (UUTs). Here, a UUT is any individual software or hardware - component intended for testing. Because this guide focuses on software, we - use the term RUT (routine under test) to denote - the unit under test in software contexts. Although we use software-centric - terminology, the principles outlined here apply equally to hardware - testing.

- -

Each test routine supplies inputs to a RUT, collects the resulting - outputs, and determines whether the test passes or fails based on those - values. A given test routine might repeat this procedure for any number - of test cases. The final result from the test - routine is then relayed to the Testbench. Testers and developers write - the test routines and place them into the Testbench.

- -

Mosaic is a Testbench. It serves as a structured environment for - organizing and executing test routines, and it provides a library of utility - routines for assisting the test writer. When run, the Testbench sequences - through the set of test routines, one by one, providing each test routine - with an interface to control and examine standard input and output. Each - test routine, depending on its design, might in turn sequence through - test cases. During execution, the test - bench records pass/fail results, lists the names of the test routines that failed, - and generates a summary report with pass/fail totals.

- -

At the time of this writing, Mosaic does not provide features for - breaking up large test runs into parallel pieces and then load balancing - those pieces. Perhaps such a feature will be developed for a future version. - However, this does not prevent an enterprising tester from running multiple - Mosaic runs with different test routines in parallel in an ad hoc manner, or - with other tools.

- -

Function versus Routine

- -

A routine is an encapsulated sequence of instructions, with a symbol - table for local variables, and an interface for importing and exporting - data through the encapsulation boundary. This interface - maps arguments from a caller - to parameters within the routine, enabling data - transfer at runtime. In the context of testing, the arguments that bring - data into the routine are referred to as - inputs, while those that carry data out are called - outputs. Notably, in programming, outputs are often called - return values.

- -

In computer science, a pure function is a routine - in which outputs depend solely on the provided inputs, without reference to - any internal state or memory that would persist across calls. A pure function - produces the same output given the same inputs every time it is called. - Side effects, such as changes to external states or reliance on external - resources, are not present in pure functions; any necessary interactions - with external data must be represented explicitly as inputs or outputs. - By definition, a function produces a single output, though this output can - be a collection, such as a vector or set.

- -

Routines with internal state variables that facilitate temporal behavior - can produce outputs that depend on the sequence and values of prior - inputs. This characteristic makes such routines challenging to - test. Generally, better testing results are achieved when testing pure - functions, where outputs depend only on current inputs.

- - -

Block and Integration

- -

A test routine provides inputs to a RUT and collects its outputs, often - doing so repeatedly in a sequence of test cases. The test routine then - evaluates these values to determine if the test has passed or failed.

- -

When a test routine evaluates a RUT that corresponds to a single function - or module within the program, it performs a block - test.

- -

When a test routine evaluates a RUT that encompasses multiple program - components working together, it is conducting - an integration test.

- -

Integration tests typically involve combining substantial components of a - program that were developed independently. Such tests can occur later in the - project timeline, where they can reveal complex and unforeseen interactions - between components when there is not adequate time to deal with them. To - help address these challenges, some software development methodologies - recommend to instead introducing simplified versions of large components - early in the development process, and to then refine them over time.

- -

Failures and Faults

- -

A test routine has two primary responsibilities: firstly in supplying inputs - and collecting outputs from the RUT, and secondly in determining whether the RUT - passed or failed the test. This second responsibility is handled by - the failure decider. When the failure decider is not - an explicit function in the test routine,its functionality will still be present - in the test routines logic.

- -

A given failure decider might produce false - positive or false negative results. A - false positive occurs when the failure decider indicates that a test has - passed when it should have failed; hence, this is also known as - a false pass. Conversely, a false negative occurs - when the decider indicates failure when the test should have passed; hence, this also - known as a false fail. An ideal - failure decider would produce neither false passes nor false - fails.

- -

In a typical testing workflow, passing tests receive no further - scrutiny. In contrast, failed tests are further examined to locate the - underlying fault. Thus, for such a workflow, false fails are likely to be - caught in the debugger, while false passes might go undetected until - release, then be discovered by users. Early in the project timeline, this - effect can be mitigated by giving passing cases more scrutiny, essentially - spot-checking the test environment. Later, in regression testing, the volume - of passing cases causes spot-checking to be ineffective. Alternative - strategies include redundant testing, better design of the failure decider, - or employing other verification techniques.

- -

A failure occurs when there is a deviation between the observed output from a RUT and the ideal output. When the ideal output is unavailable, a reference output is often used in its place. When using reference outputs, the accuracy of test results depends on both the accuracy of the failure decider and the accuracy of the reference outputs themselves.

- -

Some testers will refer to an observed output as an actual output. Additionally, some testers will call reference outputs golden values, particularly when those values are considered highly accurate. However, the terminology introduced earlier aligns more closely with that used in scientific experiments, which is fitting since testing is a form of experimentation.

- -

A fault is a flaw in the design, implementation, or realization of a - product that, if fixed, would eliminate the potential for a failure to be - observed. Faults are often localized to a specific point, but they can also - result from the mishandling of a confluence of events that arise during - product operation.

- -

The goal of testing is to create conditions that make failures observable. Once a failure is observed, it is the responsibility of developers, or testers in a development role, to debug these failures, locate the faults, and implement fixes.

- -

Root cause analysis extends beyond the scope of development and test. It - involves examining project workflows to understand why a fault exists in the - product. Typically, root cause analysis will identify a root cause that, if - "fixed," would not eliminate the potential for a failure to be observed in - the current or near-term releases. Consequently, root cause analysis is - generally not a priority for design and testing but instead falls within the - domain of project management.

- -

A technique commonly used to increase the variety of conditions—and thus the likelihood of creating conditions that reveal faults—is to run more tests with different inputs. This is called increasing the test coverage.

- -

The Mosaic tool assists testers in finding failures, but it does not directly help with identifying the underlying fault that led to the failure. Mosaic is a tool for testers. However, these two tasks—finding failures and locating faults—are not entirely separate. Knowing where a failure occurs can provide the developer with a good starting point for locating the fault and help narrow down possible causes. Additionally, once a developer claims to have fixed a fault, that claim can be verified through further testing.

- -

Testing Objectives

- -
    -
  • - Verification Testing
    - Purpose: To confirm that the software or system meets the specified requirements and design. Verification testing ensures that each component behaves as expected according to specifications, often conducted throughout development to catch any deviations from the original plan. -
  • - -
  • - Regression Testing
    - Purpose: To ensure that recent changes or additions to the codebase have not introduced new errors. This type of testing checks that previously tested functionalities still work as intended, making it essential for maintaining stability as updates are made. -
  • - -
  • - Development Testing
    - Purpose: To evaluate code correctness and functionality during the development process. Development testing is often exploratory, allowing developers to check whether their code performs as expected before formal testing. It can include unit testing, integration testing, and other quick checks to validate functionality on the fly. -
  • - -
  • - Exploratory Testing
    - Purpose: To uncover unexpected issues by testing the software in an unscripted manner. Exploratory testing allows testers to investigate the software's behavior outside of planned test cases, often discovering edge cases or flaws that structured tests may miss. -
  • - -
  • - Performance Testing
    - Purpose: To assess how the software performs under expected and extreme conditions. Performance testing evaluates response times, resource usage, and stability, often covering areas like load, stress, and scalability testing. This objective ensures the system can handle the demands it will face in production. -
  • - -
  • - Compliance Testing
    - Purpose: To confirm that the software adheres to regulatory, legal, and industry standards. Compliance testing ensures that the system meets external requirements, which may include accessibility, data privacy, and industry-specific standards. -
  • - -
  • - Security Testing
    - Purpose: To identify vulnerabilities and ensure the software is protected against unauthorized access and threats. Security testing checks for risks like data breaches, weak authentication, and exposure to known vulnerabilities, helping to safeguard sensitive information and user privacy. -
  • - -
  • - Compatibility Testing
    - Purpose: To verify that the software works across different environments, devices, and platforms. Compatibility testing ensures consistent functionality and appearance across browsers, operating systems, hardware configurations, and other setups. -
  • - -
  • - Acceptance Testing
    - Purpose: To determine if the software meets the end user's needs and expectations. Acceptance testing, often conducted by stakeholders or QA teams, validates that the software is usable and functional from a real-world perspective, acting as the final check before release. -
  • - -
  • - Documentation Testing
    - Purpose: To ensure that all documentation, guides, and user manuals are accurate and reflect the current software functionality. Documentation testing verifies that users have clear, up-to-date information for effective usage and troubleshooting. -
  • - -
  • - Usability Testing
    - Purpose: To confirm that the software is user-friendly and intuitive. Usability testing focuses on the ease of use, ensuring that end users can navigate and interact with the software without unnecessary friction, leading to a positive user experience. -
  • - -
- -

The Moasic Testbench is useful for any type of testing that can be - formulated as test routines testing RUTs. This certainly includes - verification, regression, development, exploratory testing. It will - include the portions of performance, compliance, security, compatibility, - and acceptance testing that fit the model of test routines and RUTs. Only - recently has can it be imagined that the Mosaic TestBench can be used with - documentation testing. However, it is now possible to fit an AI API into a - test routine, and turn a document into a RUT. Usability testing often - depends in other types of tests, so to this extent the Mosaic Testbench - can play a role. However, usability is often also in part feedback from - users. So short of putting users in the Matrix, this portion of usability - testing remains outside the domain of the Mosaic Testbench, though come to - think of it, the Mosaic Testbench could be used to reduce surveys to pass - fails.

- -

Each test objective will lead to writing tests of a different nature.

- - -

Unstructured Testing

- -

This section outlines some common approaches - to unstructured testing, often referred to - as black box testing. Black boxes are inherent - in even the most structured testing approaches, as at the lowest levels of - analysis, elements will always remain opaque. Even in the most highly - detailed test of logic possible, one that examines a RUT down to the - individual logic gates, each gate would be treated as a black box.

- -

Reference Output Based Testing

- -

In reference output based testing, an ordering - is assigned to the inputs for - the routine under test, as well as to - its outputs. Through this ordering the inputs - and outputs become vectors. Thus the routine under test is given - an input vector and it returns - an observed output vector.

- -

A Reference Model is then - given the same input vector, and then it - produces a reference output vector. The reference - output vector has the same component ordering as the - observed output vector. - -

The failure detection function then compares - each observed output vector with its corresponding reference output vector. If - they do not match, the test is deemed to have failed.

- -

It follows that in reference output based testing, the accuracy of the - test results depends solely on the accuracy of the Reference Model.

- -

When the implementation of the Reference Model is unrelated to the - routine under test, we tend to expect that the errors produced by the - Reference Model will be uncorrelated with those produced by the routine - under test, and thus not probable to coincide. This property will bias - test routines towards delivering false fails. As noted earlier, false fails are - likely to be caught as test fails are followed up with further - scrutiny. It follows that reference output based testing can potentially - deliver a high degree of accuracy even though the reference model is not - ideal.

- -

Property Check Testing

- -

Property Check Testing is an alternative to - reference output based testing. Here, rather than comparing each observed - output to a reference output, the observed output is validated against - known properties or expected characteristics.

- -

For example, given an integer as input, a function that correctly squares - this input will preserve the parity of the input, as an odd number squared - will be odd, and an even number squared will be even. The failure decider - can check this property for each test case, and if it does not hold, the - test case fails.

- -

Note for the square RUT test, this proposed property check is weak. Given - a uniform distribution, half the time an errant square will still have the - correct parity. There are stronger property checks that could be done for - squares, but the point here is one of illustration. A weak property check - would not recognize many failures, and thus be biased towards false pass - decisions. Those are the bad ones, as passing tests typically receive no - further scrutiny.

- -

Spot Checking

- -

In spot checking, the function under test is checked against one or two - input vectors. When using a black box approach, these are chosen at - random.

- -

Moving from zero to one is an finite relative change, i.e., running a - program for the first time requires that many moving parts work together, - parts that have never been tried before; hence, a tremendous amount is - learned about the logic and setup when the first test runs. Such a first - test is called a smoke test, a term that - has literal meaning in the field of electronics testing.

- -

Exhaustive Testing

- -

A test routine will potentially run multiple test cases against a given - RUT. If the RUT is a pure function, then per test case, a single test - vector will be given to the RUT, and a single output vector will be - returned. However, if the RUT is sequential in nature, for each test case - there will be a sequence of input vectors, and potentially a sequence of - output vectors.

- -

The set of possible inputs for a RUT, were members are either individual - vectors, or vector sequences, constitutes the input - space. Test coverage is typically given - as the proportion or inputs tested to the total in the input space, - reported as a percentage./p> - -

When the RUT is a pure function, the input space is an enumeration of all - possible input vectors. If the inputs include arbitrary long strings, then it - will not be possible to complete such an enumeration, the best that can - be done is to generate more and more inputs upon demand. -

- -

When the RUT has sequential behavior, achieving full coverage requires - giving the RUT every possible starting input, and then sequencing it to a - point of hitting a stop state or cycle state in every possible way. Again - if inputs can be arbitrarily long strings, such an enumeration can not be - completed. Furthermore, if the RUT state is encapsulated unseen in a black - box, it might be very difficult, or impossible, to detect when the state - has cycled.

- -

Exhaustive testing is said to have been - done when every single input in the input space has been tested. - An exhaustive test will have obtained 100% coverage, with no rounding - done in the coverage computation.

- -

Suppose that a fault appears at time t₀. Suppose there is a duration of - time of interest, Δ, that begins at or later than t₀. Suppose further - there exists a given test and test case that fails due to the fault, but - would not otherwise fail. Then a failure is - reproducible during Δ, if and only if the given test and test case - would fail if run at any time during Δ, and no matter how many times it is - run.

- -

For a RUT that is a pure function, this definition is the same as saying - the test case fails at the same input value every time during Δ, when - ideally is should have passed. For a sequential RUT, it is saying that the - same input vector sequence will always lead to a failure, when ideally it - would lead to a pass.

- -

Although the same test routine is run with identical inputs, a failure - might not be reproducible due to other sources of variability, as - examples:

-
    -
  1. The contract made with the programmer for using the exact same - inputs for the exact same test routine was broken. -
  2. Use of uninitialized memory. -
  3. Software updates or platform changes in between test runs during Δ. -
  4. Green thread, or real thread, scheduling differences, whether done by the OS or by the interpreter. -
  5. Using the system time as data, or other system parameter. -
  6. Race conditions. -
  7. Getting values from a randomly seeded pseudo random number generator.
  8. -
  9. Reaching out of the architecture model for values, as examples - using performance measures or by timing events.
  10. -
  11. A hardware fault that is sensitive to a myriad of possible environmental - influences.
  12. -
- -

Exhaustive testing will find all failures that are reproducible. It might - find failures that are not reproducible. The probability of witnessing - non-reproducible failures will typically go up when using the technique - of over testing, i.e. running even more than an - exhaustive number of tests.

- -

Structured Testing

- -

Structured testing is a form of white box testing, where the tester - examines the code being tested and applies various techniques to it - to increase the efficiency of the testing.

- -

The Need for Structured Testing

- -

All types of black-box testing have a serious problem in that the search - space for failures grows exponentially as the number of inputs grows. You have - probably heard about this sort of thing before, but you might not appreciate - just how severe the situation is. To illustrate, we will consider the simplest of - programs, one that adds two numbers. When the RUT is a black box, the test routine - only has access to the interface, so it appears like this:

- -

-        int8 sum(int8 a, int8 b){
-        ...
-        }
-    
- -

Here, two int8 values are being added, so an input test vector will have - 16 bits. The result is also an int8, so an output vector will have 8 bits.

- -

As the internals of the RUT are unknown, it could contain unexpected logic, like this:

- -

-        int8 sum(int8 a, int8 b){
-        if(a == 248 && b == 224) return 5;
-        else return a + b;
-        }
-    
- -

A developer might not be writing malicious code when something like this - appears; instead, the code might have been pulled from somewhere else and - dropped in. There could have been a special case in this situation on another - machine. Perhaps the code was generated by an AI, or it could be leftover - debug information. This example illustrates that testers are typically not - responsible for understanding developer code. Though in this case the logic - is obvious, there can be more obscure functions that testers cannot take the - time to understand, which might exhibit similar unexpected behavior.

- -

As this is a black box, the numbers 248 and 224 are not known to the test writer. - Therefore, the only effective unstructured testing approach that is guaranteed to - find this failure is exhaustive testing.

- -

Exhaustive testing is feasible here. An input test vector with 16 bits will lead to - an input space of 65,536 points. Sixty-five thousand tests is trivial for a modern - desktop. The full test will take about 100 microseconds, and in this time the test - routine is guaranteed to find all failures. Note that in 50 microseconds, half of - the input space will be covered, so there is a 0.5 probability of finding a single - failure within that time. Generally, half the total time corresponds to a 0.5 probability - of finding a single failure.

- -

Now, suppose that instead of looking for a reproducible fault, we have:

-

-      int8 sum(int8 a, int8 b){
-        if(a == 255 * rand() && b == 224 * rand()) return 5;
-        else return a + b;
-      }
-    
- -

In this case, to find the fault, the test routine must guess the values of two independent - 8-bit random variables from a uniform distribution. As they are independent, we can combine - them and note that the test must guess a 16-bit value. If we consider an "exhaustive" test, - the tester will make 2^16 tries. Hence, the probability of finding this failure is:

- -

-        1 - (1 - 2-16)216 = 0.6321...
-    
- -

A small adjustment to the above equation is necessary to make it precise, because - sometimes 5 is the correct answer. Thus, with 216 test cases, there will - be certainty (a probability of 1.0) in finding all reproducible errors and about - a 0.63 probability of finding a single random fault. The two probabilities are not - as far apart as one might expect, given that the failure is "jumping around."

- -

Now, let's go back to the reproducible error case, but this time, suppose we are working - with an int16:

- -

-      int16 sum(int16 a, int16 b){
-        ...
-      }
-    
- -

Now an input vector has 32 bits, giving an input space with 21,474,836,480 points. - Our computer will require about 33 seconds of compute time for this. Adding around - 10 seconds for wall-clock time, let’s call it 40 seconds. Testing would be barely - practical if it took 40 seconds to test such a simple RUT as this, but perhaps we - would invest in a faster computer?

- -

-      int32 sum(int32 a, int32 b){
-        ...
-      }
-    
- -

Now, suppose we are adding 32-bit numbers. The input space now has 18,446,744,073,709,551,616 points. - Compute time, without overhead, will be about 4,496 years! Suffice it to say, we have discovered that - testing the addition of two 32-bit numbers exhaustively is impractical. Even if we break the problem - into 1,000 pieces on different processors and use a state-of-the-art server farm, it would still take - months and cost a significant amount. What will you tell the boss?

- -

But wait! What if we move to 64-bit computing?

- -

-        int64 sum(int64 a, int64 b){
-        ...
-        }
-    
- -

The input space now has:

-

-        340,282,366,920,938,463,463,374,607,431,768,211,456
-    
-

points. That's about 340 undecillion. Compute time is 83 sextillion years—or about - 6 trillion times the age of the universe. Even with all the processing power on Earth, - even if you're willing to accept a probability of 0.1 of finding the failure, it would - take a thousand times longer than the age of the universe to test a function as simple - as adding two numbers. Clearly, there must be a better approach.

- - -

Summary Table

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
BitsInput SpaceCompute Time
8 bits6.55 x 104100 μs
16 bits2.15 x 101033 s
32 bits1.84 x 10194,496 years
64 bits3.40 x 10386 x 1012 times the age of the universe
- -

A typical response from people when they see this is that the knew it went up - fast, but did not know it went up this fast. It is also important to note, there - is a one to one relationship between percentage of time to achieving exhaustive - coverage, and percentage of coverage. Half the time, 50 percent coverage. In - the last row of the table, to have reasonable test times, there would be coverage - 10-18 percentage coverage. At that level of coverage there is really - no reason to test. Hence, this table is not limited to speaking about exhaustive - testing, rather it speaks to black box testing in general.

- -

Informed Spot Checking

- -

In white box testing, we take the opposite approach to black box - testing. The test writer does look at the code implementation and - must understand how to read the code. Take our 64-bit adder example of - the prior section. Here in this section we will apply a white box - technique known as Informed Spot Checking.

- -

This is the prior example as a black box:

- -

-      int64 sum(int64 a, int64 b){
-        ...
-      }
-    
- -

And here it is as a white box:

- -

-      int64 sum(int64 a, int64 b){
-        if(a == 5717710 && b == 27) return 5;
-        else return a + b;
-      }
-    
- -

When following the approach of Informed Spot Checking, the tester examines - the code and sees there is a special case for a = 5717710 - and b = 27, which becomes the first test case. There’s also - a special case for when the sum exceeds the 64-bit integer range, both in - the positive and negative directions; these become two more test - cases. Finally, the tester includes a few additional cases that are not - edge cases.

- -

Thus, by using white box testing instead of black box testing, the tester finds all - the failures with just 4 or so test cases instead of

-

-      340,282,366,920,938,463,463,374,607,431,768,211,456 
-     
-

cases. Quite a savings, eh?

- -

There are notorious edge cases in software, and these can often be seen - by looking at the RUT. Zeros and inputs that lead to index values just off - the end of arrays come to mind are common ones. Checking a middle value - and edge cases is often an effective approach for finding failures.

- -

There is an underlying mechanism at play here. Note that it takes two - points to determine a line. In Fourier analysis, it takes two samples per - period of the highest frequency component to determine an entire - waveform. Code also has patterns, patterns that are disjoint at edge - cases. Hence if a piece of code runs without failures for both edge cases - and spot check values in between, it will often run without failures over - an entire domain of values. This effect explains why ad hoc testing has - lead to so much relatively fail free code.

- -

Informed Spot Checking is especially valuable in early development, as it - provides useful insights with minimal investment. In the early development - stage, making more investment in test code is unwise due to the code being - in flux. Test work is likely to get ripped up and replaced.

- -

The idea of test work being ripped up and replaced highlights a drawback - of white box testing. Analysis of code can become stale when implementations - are changed. However, due to the explosion in the size of the input space - with even a modest number of inputs, white box testing is necessary if there - is to be much commitment to producing reliable software or hardware.

- -

Refactoring the RUT

- -

Refactoring a RUT to make it more testable can be a powerful method for - turning testing problems that are exponentially hard due to state - variables, or very difficult to debug due to random variables, into - problems that are linearly hard. According to this method, the - tester is encouraged to examine the RUT to make the testing problem - easier.

- -

By reconstructing the RUT I mean that we refactor the code to bring - any random variables or state variables to the interface where they - are then treated as inputs and outputs.

- -

If placing state variables on the interface is adopted as a discipline by - the developers, reconstruction will not be needed in the test phase, or if - it is needed, white box testers will see this, and it will be a bug that - has been caught. Otherwise reconstruction leads to two versions of a - routine, one that has been reconstructed, and the other that has not. The - leverage gained on the testing problem by reconstructing a routine - typically more than outweighs the extra verification problem of comparing - the before and after routines.

- -

As an example, consider our adder function with a random fault. As we - know from prior analysis, changing the fault to a random number makes - testing harder, but perhaps more importantly, it makes it nearly impossible - to debug, as the tester can not hand it to the developer and say, - 'it fails in this case'.

-

-      int64 sum(int64 a, int64 b){
-        if( a == (5717710 * rand()) && b == (27 * rand()) ) return 5;
-        else return a + b;
-      }
-    
- -

The tester refactors this function as:

-

-      int64 sum( int64 a, int64 b, a0 = 5717710*rand() ,b0 = 27*rand() ){
-        if( a == a0 && b == b0 ) return 5;
-        else return a + b;
-      }
-    
- -

Here a0 and b0 are added to the interface as - optional arguments. During testing their values will be supplied, during - production the defaults will be used. Thus, we have broken the one - test problem into two, the question if sum works, and the - question if the random number generation works.

- -

Failures in sum found during testing are now reproducible. - If the tester employs the informed spot checking the failure will - be found with few tests, and the point in the input space where the - failure occurs can be reported to development and used for debugging.

- -

Here is a function that keeps a state variable between calls.

-

-    int state = 0;
-    int call_count = 0; 
-    void state_machine(int input) {
-        int choice = (input >> call_count) & 1; 
-        switch (state) {
-            case 0:
-                printf("State 0: Initializing...\n");
-                state = choice ? 0 : 1;
-                break;
-            case 1:
-                printf("State 1: Processing Path A...\n");
-                state = choice ? 0 : 2; 
-                break;
-            case 2:
-                printf("State 2: Processing Path B...\n");
-                state = choice ? 0 : 3;
-                break;
-        }
-        call_count++;
-    }
-    
- -

The Mosaic Testbench makes standard out available to the test routine in - an array so we can capture and examine the print value while testing this - RUT. Because of the state variables, state - and count, this routine will behave differently each time it - is called. A black box test will have a large number of input vector - sequences to try. The failure occurs in the call after being in state 2 - and the count is such that the choice is to go to state 3.

- -

-    int state = 0;
-    int call_count = 0; 
-    void state_machine(int input ,int state0 = state ,int call_count0 = call_count) {
-        int choice = (input >> call_count0) & 1; 
-        switch (state0) {
-            case 0:
-                printf("State 0: Initializing...\n");
-                state = choice ? 0 : 1;
-                break;
-            case 1:
-                printf("State 1: Processing Path A...\n");
-                state = choice ? 0 : 2; 
-                break;
-            case 2:
-                printf("State 2: Processing Path B...\n");
-                state = choice ? 0 : 3;
-                break;
-        }
-        call_count = call_count0 + 1;
-    }
-    
- -

Here the test routine supplies state0 and call_count0 - as inputs. The test routine treats state and call_ccount - as outputs, so this is then a pure function. As a pure function it is a much easier - testing problem. Now instead of a combinatorially hard problem involving input - sequences, the test routine can visit each of the three states, and set the input - such that each visits the two next states. That is six test cases to see everything - that this function is capable of doing.

- -

Any time the RUT is refactored in the testing phase, it raises the - question if the refactored code maintains the required functionality. - This becomes another verification problem, which might or might not - be verified through testing. One way to manage this issue is to - take the refactoring problems back to the developers to have them - adopt the code into the project. Then it becomes the original code.

- -

Bottom Up Testing

- -

When a function corresponds directly to CPU instructions, such as is the - case for the + operator, we typically trust that it will give - the right answer. The same can be said for the call and return - dynamic. Unless we are working on a new compiler, it is typically assumed - that this works. Tests for it are not included for testing if calls work in - application program test suites. -

- -

The reason for this trust is that CPU instructions, and function calls - are already extensively tested, both directly by the manufacturers, and - through widespread use. Though this trust is not always warranted as in - the case of the Intel Pentium divider, which had failure cases.

- -

We can decompose a testing problem into trusted and untrusted components. - We call routines that are trusted building blocks, - then we use the building blocks to build up larger routines, and then - test those to create larger building blocks. At the end we will have - built up a trustworthy program.

- -

This approach parallels what developers do when they write programs. They - start with primitive programs that come with the language or from - libraries, and then they compose these to write custom functions.

- -

The following is an expansion of our adder example for creating and - testing an adder for 1024 bit numbers. For purposes of presentation, we - will refer to int256 as a type that corresponds to array of - 32 bytes, and uint1 as a 1 bit unsigned integer, i.e. 0 or - 1.

- -

-    {uint1, uint64} full_adder(uint64 a, uint64 b, uint1 c0) {
-        uint64 partial_sum = a + b;
-        uint64 sum = partial_sum + c0;
-        uint1 carry_out = (partial_sum < a) || (sum < partial_sum);
-        return {carry_out, sum};
-    }
-    
- -

Here is a 256 bit adder made from 64 bit adders.

- -

-    {uint1, int256} add_256(int256 a, int256 b) {
-        uint1 carry_in = 0;
-        int64 sum_parts[4];  // Array to store each 64-bit segment of the sum
-
-        for i = 0 to 3 {
-            // Get the i-th 64-bit segments of a and b
-            int64 a_part = (a >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
-            int64 b_part = (b >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
-
-            // Perform the full addition on each 64-bit part
-            {carry_out, sum_parts[i]} = full_adder(a_part, b_part, carry_in);
-
-            // Update carry-in for the next 64-bit segment
-            carry_in = carry_out;
-        }
-
-        int256 sum = 0;
-        for i = 0 to 3 {
-            sum |= (sum_parts[i] << (i * 64));
-        }
-
-        return {carry_in, sum};
-    }
-    
- -

According to the bottom up technique, we first test - the full_adder, which is not a difficult testing problem. It - employs well known trusted operations, and has a couple of interesting - special case conditions. Given the numeric nature of this code, these - special case conditions are probably better verified by proof than by - testing, but they can be tested.

- -

Once the full_adder can be trusted, testing add_256 - reduces to checking that the various 64 bit parts are extracted and then - packed correctly, - and are not, say, offset by one, and that the carries are properly communicated - during the add.

- -

Note this test also trusts the fact that ripple carry addition is a valid - algorithm for assembling the pieces. Thus there is a new verification - problem, that for the algorithm. In this case, ripple carry addition is - already a trusted algorithm.

- -

Testing of full_adder could be further simplified with - refactoring, by moving the loop control variables to the interface and the - carry_in and carry_out to the interface. - As i is recycled, it would become two variables, - say i and j. Once the loop control variables - are on the interface it is straight forward to test the packing. Once the - carries are on the interface it is straight forward to test the - carries.

- -

In general all programs and circuits can be conceptualized as functional - units, channels, and protocols. A test that shows that these work as specified, - shifts the test problem from the RUT to the specification.

- -

Adding to the code

- -

It is a common practice to add property checks to the code for gathering - data about failures or other potential problems. These will then write to - log files, or even send messages back to the code maintainers. By doing - this the testers benefit from the actual use of the product as though it - were a test run. When failures are found, such code might then trigger - remedial or recovery actions.

- -

About Reference Outputs and Reference Properties

- -

When testing during development, reference outputs often come from the - developers or testers themselves. They know what they expect from the - routines, but they do not know if the code will meet these expectations, - so they write tests. Typically, they try to imagine the hardest possible - cases. However, sometimes a young developer avoids testing challenging - cases to sidestep the risk of failures—this is, of course, a poor approach - that can lead to undetected issues.

- -

Often, specification authors provide reference outputs or extensive test - suites that must be passed to achieve certification. Architects also - contribute by creating multi-level specifications—for the entire program, - for the largest components, and for communication protocols between - components. These specifications often serve as high-quality reference - outputs and property checks that can be applied to the model during testing. - The goal of developers and testers is to meet these specifications, making - failures directly relevant to the development process and program design.

- -

Experts in a specific area sometimes provide test data, maintaining - a database of reference data as a resource for validating outputs. - For some types of code, experts also supply property checks, which - evaluate whether outputs satisfy essential properties rather than specific - values. Depending on the domain, these properties can be an important aspect - of the testing process.

- -

Each time a bug is found, a test should be created to capture a failure - related to that bug. Ideally, such tests are written with minimal - implementation-specific details so they remain relevant even after code - changes. These tests are then added to a regression testing suite, ensuring - that future changes do not reintroduce the same issues.

- -

For applications involving multi-precision arithmetic, such as the earlier - adder example, reference data is often sourced from another established - multi-precision library, whether an open-source or commercial product. The - assumption is that an existing product will be more reliable than a newly - developed one, and since it’s implemented differently, its errors are likely - to be uncorrelated. This competitive testing, which is aspect of - compatibility testing, here being used for other objectives. In the limit, as - the RUT matures, this approach will tend to identify bugs in the reference - data from the other company as often it does in the RUT, which might be an - interesting effect.

- -

In some cases, reference data comes from historical sources or existing - systems. When upgrading or replacing a legacy system, historical data - serves as a benchmark for comparison. Similarly, industry standards - and compliance datasets, particularly from regulatory organizations - like IEEE, NIST, or ISO, provide reliable reference points for applications - requiring standardized outputs. Compliance-driven tests are often required - for certification or regulatory approval in fields such as finance, - healthcare, and aerospace.

- -

For cases requiring many inputs without needing specific reference values, - random number generators can provide extensive test data. Examples include in - comparative testing and when property checking. Random number generators can - also be configured to concentrate cases in specific areas of the input domain - that for some reason concerns the testers.

- -

Customer and user feedback sometimes uncovers additional test cases, - especially when dealing with complex or evolving software. Feedback - reveals edge cases or expected behaviors that developers and testers - may not have anticipated, allowing teams to create reference points - for new test cases that cover real-world use cases and address user needs.

- -

Conclusion

- -

If you are a typical tester or developer reading through the previous list, - you might feel a bit disappointed. Unless you work in a specialized area, - are attempting to create a compatible product, or need to exercise the hardware, much - of that list might seem inapplicable. For many developers, the most - applicable advice remains: "During development, reference outputs often - come from the developers or testers themselves." I apologize if this seems - limiting, but consider this: the reason we run programs is to generate the - very data we're looking for. If that data were easily available, we wouldn’t - need the program.

- -

In many ways, testing is about making developers and testers the first - users of the product. All products will have bugs; it’s far better for - experts to encounter these issues first.

- -

Testing also facilitates communication among project members. Are the - architects, developers, and testers all on the same page about how the - product should work? The only way to find out is to run what has been built - and observe it in action. For this, we need test cases.

- -

This circular problem—finding data that our program should generate - to test - the program itself — illustrates a fundamental limitation in software testing. - We encountered this in the discussion on unstructured, black-box testing: as - soon as we open the box to inspect the code, we are no longer just testing it, - but reasoning about it and even verifying it formally.

- -

This, perhaps, hints at a way forward. Our program is a restatement of the - specification in another language. Verification, then, is an equivalence - check. We can run examples to demonstrate equivalence, but black-box testing - alone will have limited impact. Alternatively, we can examine our code and - try to prove that it matches the specification. Though challenging, this - approach is far more feasible than waiting ten times the age of the universe - to confirm our solution through black box testing.

- -

Think of testing as a reasoning problem. Explain why the routine works and - how it contributes to meeting the specification. Work from the top down: if - the high-level components behave correctly, the program will meet the - specification. That’s the first step. Then explain why the breakdown of - those top-level components ensures correct behavior. Continue this process, - and then use tests to validate each link in this chain of reasoning. In this - way, you can generate meaningful reference values.

- -
- - - - diff --git a/document/license.txt b/document/license.txt deleted file mode 100644 index e177f6f..0000000 --- a/document/license.txt +++ /dev/null @@ -1,152 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that -entity. For the purposes of this definition, "control" means (i) the power, -direct or indirect, to cause the direction or management of such entity, whether -by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of -the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this -License, each Contributor hereby grants to You a perpetual, worldwide, -non-exclusive, no-charge, royalty-free, irrevocable copyright license to -reproduce, prepare Derivative Works of, publicly display, publicly perform, -sublicense, and distribute the Work and such Derivative Works in Source or -Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, -each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) patent -license to make, have made, use, offer to sell, sell, import, and otherwise -transfer the Work, where such license applies only to those patent claims -licensable by such Contributor that are necessarily infringed by their -Contribution(s) alone or by combination of their Contribution(s) with the Work -to which such Contribution(s) was submitted. If You institute patent litigation -against any entity (including a cross-claim or counterclaim in a lawsuit) -alleging that the Work or a Contribution incorporated within the Work -constitutes direct or contributory patent infringement, then any patent licenses -granted to You under this License for that Work shall terminate as of the date -such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or -Derivative Works thereof in any medium, with or without modifications, and in -Source or Object form, provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and You must cause any modified files to carry prominent notices -stating that You changed the files; and You must retain, in the Source form of -any Derivative Works that You distribute, all copyright, patent, trademark, and -attribution notices from the Source form of the Work, excluding those notices -that do not pertain to any part of the Derivative Works; and If the Work -includes a "NOTICE" text file as part of its distribution, then any Derivative -Works that You distribute must include a readable copy of the attribution -notices contained within such NOTICE file, excluding those notices that do not -pertain to any part of the Derivative Works, in at least one of the following -places: within a NOTICE text file distributed as part of the Derivative Works; -within the Source form or documentation, if provided along with the Derivative -Works; or, within a display generated by the Derivative Works, if and wherever -such third-party notices normally appear. The contents of the NOTICE file are -for informational purposes only and do not modify the License. You may add Your -own attribution notices within Derivative Works that You distribute, alongside -or as an addendum to the NOTICE text from the Work, provided that such -additional attribution notices cannot be construed as modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, any -Contribution intentionally submitted for inclusion in the Work by You to the -Licensor shall be under the terms and conditions of this License, without any -additional terms or conditions. Notwithstanding the above, nothing herein shall -supersede or modify the terms of any separate license agreement you may have -executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, -trademarks, service marks, or product names of the Licensor, except as required -for reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in -writing, Licensor provides the Work (and each Contributor provides its -Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied, including, without limitation, any warranties -or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -PARTICULAR PURPOSE. You are solely responsible for determining the -appropriateness of using or redistributing the Work and assume any risks -associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in -tort (including negligence), contract, or otherwise, unless required by -applicable law (such as deliberate and grossly negligent acts) or agreed to in -writing, shall any Contributor be liable to You for damages, including any -direct, indirect, special, incidental, or consequential damages of any character -arising as a result of this License or out of the use or inability to use the -Work (including but not limited to damages for loss of goodwill, work stoppage, -computer failure or malfunction, or any and all other commercial damages or -losses), even if such Contributor has been advised of the possibility of such -damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or -Derivative Works thereof, You may choose to offer, and charge a fee for, -acceptance of support, warranty, indemnity, or other liability obligations -and/or rights consistent with this License. However, in accepting such -obligations, You may act only on Your own behalf and on Your sole -responsibility, not on behalf of any other Contributor, and only if You agree to -indemnify, defend, and hold each Contributor harmless for any liability incurred -by, or claims asserted against, such Contributor by reason of your accepting any -such warranty or additional liability. - -END OF TERMS AND CONDITIONS diff --git a/document/readme.txt b/document/readme.txt deleted file mode 100644 index 4a42a93..0000000 --- a/document/readme.txt +++ /dev/null @@ -1,10 +0,0 @@ - --------------------------------------------------------------------------------- -Mosaic - -A tool to assist in hierarchical white box testing. - -Each piece of a program must have integrity for the complete picture to emerge. - -With Mosaic we test the pieces, then the communication between the pieces. - diff --git a/document/todo.txt b/document/todo.txt deleted file mode 100644 index 06f2ba3..0000000 --- a/document/todo.txt +++ /dev/null @@ -1,27 +0,0 @@ - -1. More languages support, notably nodejs. - -2. This message: - - Running Test_File_0...Structural problem: unpack_file_path_0 does not accept a single IO argument. - Error: unpack_file_path_0 has an invalid structure. - - Perhaps only the second one, getting rid of 'Error:' - - "Bad type signature for method: unpack_file_path_0 does not accept a single IO argument." - -3. TestBench -> Testbench perhaps? - -4. fix emacs.el so that jdbx doesn't always start with Test_Util. (note the - prior start can be found with M-x p - - -5. should include a tool for aggregating test suite runs - FYI, I'm using - -6. need an example .bashrc for setting the prompt now that I removed references -to the 'resource' project and plan to deprecate it. - -7. should check stdin for unused input and report error also. - -8. move 'unlock_class` from Ariadne tests to Mosaic_Util. diff --git "a/document\360\237\226\211/An_Introduction_to_Structured_Testing.html" "b/document\360\237\226\211/An_Introduction_to_Structured_Testing.html" new file mode 100644 index 0000000..384beb2 --- /dev/null +++ "b/document\360\237\226\211/An_Introduction_to_Structured_Testing.html" @@ -0,0 +1,1034 @@ + + + + + + + White Box Testing - Mosaic Project + + + +
+
+

An Introduction to Structured Testing

+

© 2024 Thomas Walker Lynch - All Rights Reserved.

+
+ + +

Introduction

+ +

This guide provides a general overview of testing concepts. It is + not a reference manual for the Mosaic Testbench itself. At the + time of writing, no such reference document exists, so developers and + testers are advised to consult the source code directly for implementation + details. A small example can be found in the Test_MockClass + file within the tester directory. Other examples can be found in projects + that make use of Mosaic.

+ +

A typical testing setup comprises three main components: + the Testbench, the test + routines, and a collection of units under + test (UUTs). Here, a UUT is any individual software or hardware + component intended for testing. Because this guide focuses on software, we + use the term RUT (routine under test) to denote + the unit under test in software contexts. Although we use software-centric + terminology, the principles outlined here apply equally to hardware + testing.

+ +

Each test routine supplies inputs to a RUT, collects the resulting + outputs, and determines whether the test passes or fails based on those + values. A given test routine might repeat this procedure for any number + of test cases. The final result from the test + routine is then relayed to the Testbench. Testers and developers write + the test routines and place them into the Testbench.

+ +

Mosaic is a Testbench. It serves as a structured environment for + organizing and executing test routines, and it provides a library of utility + routines for assisting the test writer. When run, the Testbench sequences + through the set of test routines, one by one, providing each test routine + with an interface to control and examine standard input and output. Each + test routine, depending on its design, might in turn sequence through + test cases. During execution, the test + bench records pass/fail results, lists the names of the test routines that failed, + and generates a summary report with pass/fail totals.

+ +

At the time of this writing, Mosaic does not provide features for + breaking up large test runs into parallel pieces and then load balancing + those pieces. Perhaps such a feature will be developed for a future version. + However, this does not prevent an enterprising tester from running multiple + Mosaic runs with different test routines in parallel in an ad hoc manner, or + with other tools.

+ +

Function versus Routine

+ +

A routine is an encapsulated sequence of instructions, with a symbol + table for local variables, and an interface for importing and exporting + data through the encapsulation boundary. This interface + maps arguments from a caller + to parameters within the routine, enabling data + transfer at runtime. In the context of testing, the arguments that bring + data into the routine are referred to as + inputs, while those that carry data out are called + outputs. Notably, in programming, outputs are often called + return values.

+ +

In computer science, a pure function is a routine + in which outputs depend solely on the provided inputs, without reference to + any internal state or memory that would persist across calls. A pure function + produces the same output given the same inputs every time it is called. + Side effects, such as changes to external states or reliance on external + resources, are not present in pure functions; any necessary interactions + with external data must be represented explicitly as inputs or outputs. + By definition, a function produces a single output, though this output can + be a collection, such as a vector or set.

+ +

Routines with internal state variables that facilitate temporal behavior + can produce outputs that depend on the sequence and values of prior + inputs. This characteristic makes such routines challenging to + test. Generally, better testing results are achieved when testing pure + functions, where outputs depend only on current inputs.

+ + +

Block and Integration

+ +

A test routine provides inputs to a RUT and collects its outputs, often + doing so repeatedly in a sequence of test cases. The test routine then + evaluates these values to determine if the test has passed or failed.

+ +

When a test routine evaluates a RUT that corresponds to a single function + or module within the program, it performs a block + test.

+ +

When a test routine evaluates a RUT that encompasses multiple program + components working together, it is conducting + an integration test.

+ +

Integration tests typically involve combining substantial components of a + program that were developed independently. Such tests can occur later in the + project timeline, where they can reveal complex and unforeseen interactions + between components when there is not adequate time to deal with them. To + help address these challenges, some software development methodologies + recommend to instead introducing simplified versions of large components + early in the development process, and to then refine them over time.

+ +

Failures and Faults

+ +

A test routine has two primary responsibilities: firstly in supplying inputs + and collecting outputs from the RUT, and secondly in determining whether the RUT + passed or failed the test. This second responsibility is handled by + the failure decider. When the failure decider is not + an explicit function in the test routine,its functionality will still be present + in the test routines logic.

+ +

A given failure decider might produce false + positive or false negative results. A + false positive occurs when the failure decider indicates that a test has + passed when it should have failed; hence, this is also known as + a false pass. Conversely, a false negative occurs + when the decider indicates failure when the test should have passed; hence, this also + known as a false fail. An ideal + failure decider would produce neither false passes nor false + fails.

+ +

In a typical testing workflow, passing tests receive no further + scrutiny. In contrast, failed tests are further examined to locate the + underlying fault. Thus, for such a workflow, false fails are likely to be + caught in the debugger, while false passes might go undetected until + release, then be discovered by users. Early in the project timeline, this + effect can be mitigated by giving passing cases more scrutiny, essentially + spot-checking the test environment. Later, in regression testing, the volume + of passing cases causes spot-checking to be ineffective. Alternative + strategies include redundant testing, better design of the failure decider, + or employing other verification techniques.

+ +

A failure occurs when there is a deviation between the observed output from a RUT and the ideal output. When the ideal output is unavailable, a reference output is often used in its place. When using reference outputs, the accuracy of test results depends on both the accuracy of the failure decider and the accuracy of the reference outputs themselves.

+ +

Some testers will refer to an observed output as an actual output. Additionally, some testers will call reference outputs golden values, particularly when those values are considered highly accurate. However, the terminology introduced earlier aligns more closely with that used in scientific experiments, which is fitting since testing is a form of experimentation.

+ +

A fault is a flaw in the design, implementation, or realization of a + product that, if fixed, would eliminate the potential for a failure to be + observed. Faults are often localized to a specific point, but they can also + result from the mishandling of a confluence of events that arise during + product operation.

+ +

The goal of testing is to create conditions that make failures observable. Once a failure is observed, it is the responsibility of developers, or testers in a development role, to debug these failures, locate the faults, and implement fixes.

+ +

Root cause analysis extends beyond the scope of development and test. It + involves examining project workflows to understand why a fault exists in the + product. Typically, root cause analysis will identify a root cause that, if + "fixed," would not eliminate the potential for a failure to be observed in + the current or near-term releases. Consequently, root cause analysis is + generally not a priority for design and testing but instead falls within the + domain of project management.

+ +

A technique commonly used to increase the variety of conditions—and thus the likelihood of creating conditions that reveal faults—is to run more tests with different inputs. This is called increasing the test coverage.

+ +

The Mosaic tool assists testers in finding failures, but it does not directly help with identifying the underlying fault that led to the failure. Mosaic is a tool for testers. However, these two tasks—finding failures and locating faults—are not entirely separate. Knowing where a failure occurs can provide the developer with a good starting point for locating the fault and help narrow down possible causes. Additionally, once a developer claims to have fixed a fault, that claim can be verified through further testing.

+ +

Testing Objectives

+ +
    +
  • + Verification Testing
    + Purpose: To confirm that the software or system meets the specified requirements and design. Verification testing ensures that each component behaves as expected according to specifications, often conducted throughout development to catch any deviations from the original plan. +
  • + +
  • + Regression Testing
    + Purpose: To ensure that recent changes or additions to the codebase have not introduced new errors. This type of testing checks that previously tested functionalities still work as intended, making it essential for maintaining stability as updates are made. +
  • + +
  • + Development Testing
    + Purpose: To evaluate code correctness and functionality during the development process. Development testing is often exploratory, allowing developers to check whether their code performs as expected before formal testing. It can include unit testing, integration testing, and other quick checks to validate functionality on the fly. +
  • + +
  • + Exploratory Testing
    + Purpose: To uncover unexpected issues by testing the software in an unscripted manner. Exploratory testing allows testers to investigate the software's behavior outside of planned test cases, often discovering edge cases or flaws that structured tests may miss. +
  • + +
  • + Performance Testing
    + Purpose: To assess how the software performs under expected and extreme conditions. Performance testing evaluates response times, resource usage, and stability, often covering areas like load, stress, and scalability testing. This objective ensures the system can handle the demands it will face in production. +
  • + +
  • + Compliance Testing
    + Purpose: To confirm that the software adheres to regulatory, legal, and industry standards. Compliance testing ensures that the system meets external requirements, which may include accessibility, data privacy, and industry-specific standards. +
  • + +
  • + Security Testing
    + Purpose: To identify vulnerabilities and ensure the software is protected against unauthorized access and threats. Security testing checks for risks like data breaches, weak authentication, and exposure to known vulnerabilities, helping to safeguard sensitive information and user privacy. +
  • + +
  • + Compatibility Testing
    + Purpose: To verify that the software works across different environments, devices, and platforms. Compatibility testing ensures consistent functionality and appearance across browsers, operating systems, hardware configurations, and other setups. +
  • + +
  • + Acceptance Testing
    + Purpose: To determine if the software meets the end user's needs and expectations. Acceptance testing, often conducted by stakeholders or QA teams, validates that the software is usable and functional from a real-world perspective, acting as the final check before release. +
  • + +
  • + Documentation Testing
    + Purpose: To ensure that all documentation, guides, and user manuals are accurate and reflect the current software functionality. Documentation testing verifies that users have clear, up-to-date information for effective usage and troubleshooting. +
  • + +
  • + Usability Testing
    + Purpose: To confirm that the software is user-friendly and intuitive. Usability testing focuses on the ease of use, ensuring that end users can navigate and interact with the software without unnecessary friction, leading to a positive user experience. +
  • + +
+ +

The Moasic Testbench is useful for any type of testing that can be + formulated as test routines testing RUTs. This certainly includes + verification, regression, development, exploratory testing. It will + include the portions of performance, compliance, security, compatibility, + and acceptance testing that fit the model of test routines and RUTs. Only + recently has can it be imagined that the Mosaic TestBench can be used with + documentation testing. However, it is now possible to fit an AI API into a + test routine, and turn a document into a RUT. Usability testing often + depends in other types of tests, so to this extent the Mosaic Testbench + can play a role. However, usability is often also in part feedback from + users. So short of putting users in the Matrix, this portion of usability + testing remains outside the domain of the Mosaic Testbench, though come to + think of it, the Mosaic Testbench could be used to reduce surveys to pass + fails.

+ +

Each test objective will lead to writing tests of a different nature.

+ + +

Unstructured Testing

+ +

This section outlines some common approaches + to unstructured testing, often referred to + as black box testing. Black boxes are inherent + in even the most structured testing approaches, as at the lowest levels of + analysis, elements will always remain opaque. Even in the most highly + detailed test of logic possible, one that examines a RUT down to the + individual logic gates, each gate would be treated as a black box.

+ +

Reference Output Based Testing

+ +

In reference output based testing, an ordering + is assigned to the inputs for + the routine under test, as well as to + its outputs. Through this ordering the inputs + and outputs become vectors. Thus the routine under test is given + an input vector and it returns + an observed output vector.

+ +

A Reference Model is then + given the same input vector, and then it + produces a reference output vector. The reference + output vector has the same component ordering as the + observed output vector. + +

The failure detection function then compares + each observed output vector with its corresponding reference output vector. If + they do not match, the test is deemed to have failed.

+ +

It follows that in reference output based testing, the accuracy of the + test results depends solely on the accuracy of the Reference Model.

+ +

When the implementation of the Reference Model is unrelated to the + routine under test, we tend to expect that the errors produced by the + Reference Model will be uncorrelated with those produced by the routine + under test, and thus not probable to coincide. This property will bias + test routines towards delivering false fails. As noted earlier, false fails are + likely to be caught as test fails are followed up with further + scrutiny. It follows that reference output based testing can potentially + deliver a high degree of accuracy even though the reference model is not + ideal.

+ +

Property Check Testing

+ +

Property Check Testing is an alternative to + reference output based testing. Here, rather than comparing each observed + output to a reference output, the observed output is validated against + known properties or expected characteristics.

+ +

For example, given an integer as input, a function that correctly squares + this input will preserve the parity of the input, as an odd number squared + will be odd, and an even number squared will be even. The failure decider + can check this property for each test case, and if it does not hold, the + test case fails.

+ +

Note for the square RUT test, this proposed property check is weak. Given + a uniform distribution, half the time an errant square will still have the + correct parity. There are stronger property checks that could be done for + squares, but the point here is one of illustration. A weak property check + would not recognize many failures, and thus be biased towards false pass + decisions. Those are the bad ones, as passing tests typically receive no + further scrutiny.

+ +

Spot Checking

+ +

In spot checking, the function under test is checked against one or two + input vectors. When using a black box approach, these are chosen at + random.

+ +

Moving from zero to one is an finite relative change, i.e., running a + program for the first time requires that many moving parts work together, + parts that have never been tried before; hence, a tremendous amount is + learned about the logic and setup when the first test runs. Such a first + test is called a smoke test, a term that + has literal meaning in the field of electronics testing.

+ +

Exhaustive Testing

+ +

A test routine will potentially run multiple test cases against a given + RUT. If the RUT is a pure function, then per test case, a single test + vector will be given to the RUT, and a single output vector will be + returned. However, if the RUT is sequential in nature, for each test case + there will be a sequence of input vectors, and potentially a sequence of + output vectors.

+ +

The set of possible inputs for a RUT, were members are either individual + vectors, or vector sequences, constitutes the input + space. Test coverage is typically given + as the proportion or inputs tested to the total in the input space, + reported as a percentage./p> + +

When the RUT is a pure function, the input space is an enumeration of all + possible input vectors. If the inputs include arbitrary long strings, then it + will not be possible to complete such an enumeration, the best that can + be done is to generate more and more inputs upon demand. +

+ +

When the RUT has sequential behavior, achieving full coverage requires + giving the RUT every possible starting input, and then sequencing it to a + point of hitting a stop state or cycle state in every possible way. Again + if inputs can be arbitrarily long strings, such an enumeration can not be + completed. Furthermore, if the RUT state is encapsulated unseen in a black + box, it might be very difficult, or impossible, to detect when the state + has cycled.

+ +

Exhaustive testing is said to have been + done when every single input in the input space has been tested. + An exhaustive test will have obtained 100% coverage, with no rounding + done in the coverage computation.

+ +

Suppose that a fault appears at time t₀. Suppose there is a duration of + time of interest, Δ, that begins at or later than t₀. Suppose further + there exists a given test and test case that fails due to the fault, but + would not otherwise fail. Then a failure is + reproducible during Δ, if and only if the given test and test case + would fail if run at any time during Δ, and no matter how many times it is + run.

+ +

For a RUT that is a pure function, this definition is the same as saying + the test case fails at the same input value every time during Δ, when + ideally is should have passed. For a sequential RUT, it is saying that the + same input vector sequence will always lead to a failure, when ideally it + would lead to a pass.

+ +

Although the same test routine is run with identical inputs, a failure + might not be reproducible due to other sources of variability, as + examples:

+
    +
  1. The contract made with the programmer for using the exact same + inputs for the exact same test routine was broken. +
  2. Use of uninitialized memory. +
  3. Software updates or platform changes in between test runs during Δ. +
  4. Green thread, or real thread, scheduling differences, whether done by the OS or by the interpreter. +
  5. Using the system time as data, or other system parameter. +
  6. Race conditions. +
  7. Getting values from a randomly seeded pseudo random number generator.
  8. +
  9. Reaching out of the architecture model for values, as examples + using performance measures or by timing events.
  10. +
  11. A hardware fault that is sensitive to a myriad of possible environmental + influences.
  12. +
+ +

Exhaustive testing will find all failures that are reproducible. It might + find failures that are not reproducible. The probability of witnessing + non-reproducible failures will typically go up when using the technique + of over testing, i.e. running even more than an + exhaustive number of tests.

+ +

Structured Testing

+ +

Structured testing is a form of white box testing, where the tester + examines the code being tested and applies various techniques to it + to increase the efficiency of the testing.

+ +

The Need for Structured Testing

+ +

All types of black-box testing have a serious problem in that the search + space for failures grows exponentially as the number of inputs grows. You have + probably heard about this sort of thing before, but you might not appreciate + just how severe the situation is. To illustrate, we will consider the simplest of + programs, one that adds two numbers. When the RUT is a black box, the test routine + only has access to the interface, so it appears like this:

+ +

+        int8 sum(int8 a, int8 b){
+        ...
+        }
+    
+ +

Here, two int8 values are being added, so an input test vector will have + 16 bits. The result is also an int8, so an output vector will have 8 bits.

+ +

As the internals of the RUT are unknown, it could contain unexpected logic, like this:

+ +

+        int8 sum(int8 a, int8 b){
+        if(a == 248 && b == 224) return 5;
+        else return a + b;
+        }
+    
+ +

A developer might not be writing malicious code when something like this + appears; instead, the code might have been pulled from somewhere else and + dropped in. There could have been a special case in this situation on another + machine. Perhaps the code was generated by an AI, or it could be leftover + debug information. This example illustrates that testers are typically not + responsible for understanding developer code. Though in this case the logic + is obvious, there can be more obscure functions that testers cannot take the + time to understand, which might exhibit similar unexpected behavior.

+ +

As this is a black box, the numbers 248 and 224 are not known to the test writer. + Therefore, the only effective unstructured testing approach that is guaranteed to + find this failure is exhaustive testing.

+ +

Exhaustive testing is feasible here. An input test vector with 16 bits will lead to + an input space of 65,536 points. Sixty-five thousand tests is trivial for a modern + desktop. The full test will take about 100 microseconds, and in this time the test + routine is guaranteed to find all failures. Note that in 50 microseconds, half of + the input space will be covered, so there is a 0.5 probability of finding a single + failure within that time. Generally, half the total time corresponds to a 0.5 probability + of finding a single failure.

+ +

Now, suppose that instead of looking for a reproducible fault, we have:

+

+      int8 sum(int8 a, int8 b){
+        if(a == 255 * rand() && b == 224 * rand()) return 5;
+        else return a + b;
+      }
+    
+ +

In this case, to find the fault, the test routine must guess the values of two independent + 8-bit random variables from a uniform distribution. As they are independent, we can combine + them and note that the test must guess a 16-bit value. If we consider an "exhaustive" test, + the tester will make 2^16 tries. Hence, the probability of finding this failure is:

+ +

+        1 - (1 - 2-16)216 = 0.6321...
+    
+ +

A small adjustment to the above equation is necessary to make it precise, because + sometimes 5 is the correct answer. Thus, with 216 test cases, there will + be certainty (a probability of 1.0) in finding all reproducible errors and about + a 0.63 probability of finding a single random fault. The two probabilities are not + as far apart as one might expect, given that the failure is "jumping around."

+ +

Now, let's go back to the reproducible error case, but this time, suppose we are working + with an int16:

+ +

+      int16 sum(int16 a, int16 b){
+        ...
+      }
+    
+ +

Now an input vector has 32 bits, giving an input space with 21,474,836,480 points. + Our computer will require about 33 seconds of compute time for this. Adding around + 10 seconds for wall-clock time, let’s call it 40 seconds. Testing would be barely + practical if it took 40 seconds to test such a simple RUT as this, but perhaps we + would invest in a faster computer?

+ +

+      int32 sum(int32 a, int32 b){
+        ...
+      }
+    
+ +

Now, suppose we are adding 32-bit numbers. The input space now has 18,446,744,073,709,551,616 points. + Compute time, without overhead, will be about 4,496 years! Suffice it to say, we have discovered that + testing the addition of two 32-bit numbers exhaustively is impractical. Even if we break the problem + into 1,000 pieces on different processors and use a state-of-the-art server farm, it would still take + months and cost a significant amount. What will you tell the boss?

+ +

But wait! What if we move to 64-bit computing?

+ +

+        int64 sum(int64 a, int64 b){
+        ...
+        }
+    
+ +

The input space now has:

+

+        340,282,366,920,938,463,463,374,607,431,768,211,456
+    
+

points. That's about 340 undecillion. Compute time is 83 sextillion years—or about + 6 trillion times the age of the universe. Even with all the processing power on Earth, + even if you're willing to accept a probability of 0.1 of finding the failure, it would + take a thousand times longer than the age of the universe to test a function as simple + as adding two numbers. Clearly, there must be a better approach.

+ + +

Summary Table

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
BitsInput SpaceCompute Time
8 bits6.55 x 104100 μs
16 bits2.15 x 101033 s
32 bits1.84 x 10194,496 years
64 bits3.40 x 10386 x 1012 times the age of the universe
+ +

A typical response from people when they see this is that the knew it went up + fast, but did not know it went up this fast. It is also important to note, there + is a one to one relationship between percentage of time to achieving exhaustive + coverage, and percentage of coverage. Half the time, 50 percent coverage. In + the last row of the table, to have reasonable test times, there would be coverage + 10-18 percentage coverage. At that level of coverage there is really + no reason to test. Hence, this table is not limited to speaking about exhaustive + testing, rather it speaks to black box testing in general.

+ +

Informed Spot Checking

+ +

In white box testing, we take the opposite approach to black box + testing. The test writer does look at the code implementation and + must understand how to read the code. Take our 64-bit adder example of + the prior section. Here in this section we will apply a white box + technique known as Informed Spot Checking.

+ +

This is the prior example as a black box:

+ +

+      int64 sum(int64 a, int64 b){
+        ...
+      }
+    
+ +

And here it is as a white box:

+ +

+      int64 sum(int64 a, int64 b){
+        if(a == 5717710 && b == 27) return 5;
+        else return a + b;
+      }
+    
+ +

When following the approach of Informed Spot Checking, the tester examines + the code and sees there is a special case for a = 5717710 + and b = 27, which becomes the first test case. There’s also + a special case for when the sum exceeds the 64-bit integer range, both in + the positive and negative directions; these become two more test + cases. Finally, the tester includes a few additional cases that are not + edge cases.

+ +

Thus, by using white box testing instead of black box testing, the tester finds all + the failures with just 4 or so test cases instead of

+

+      340,282,366,920,938,463,463,374,607,431,768,211,456 
+     
+

cases. Quite a savings, eh?

+ +

There are notorious edge cases in software, and these can often be seen + by looking at the RUT. Zeros and inputs that lead to index values just off + the end of arrays come to mind are common ones. Checking a middle value + and edge cases is often an effective approach for finding failures.

+ +

There is an underlying mechanism at play here. Note that it takes two + points to determine a line. In Fourier analysis, it takes two samples per + period of the highest frequency component to determine an entire + waveform. Code also has patterns, patterns that are disjoint at edge + cases. Hence if a piece of code runs without failures for both edge cases + and spot check values in between, it will often run without failures over + an entire domain of values. This effect explains why ad hoc testing has + lead to so much relatively fail free code.

+ +

Informed Spot Checking is especially valuable in early development, as it + provides useful insights with minimal investment. In the early development + stage, making more investment in test code is unwise due to the code being + in flux. Test work is likely to get ripped up and replaced.

+ +

The idea of test work being ripped up and replaced highlights a drawback + of white box testing. Analysis of code can become stale when implementations + are changed. However, due to the explosion in the size of the input space + with even a modest number of inputs, white box testing is necessary if there + is to be much commitment to producing reliable software or hardware.

+ +

Refactoring the RUT

+ +

Refactoring a RUT to make it more testable can be a powerful method for + turning testing problems that are exponentially hard due to state + variables, or very difficult to debug due to random variables, into + problems that are linearly hard. According to this method, the + tester is encouraged to examine the RUT to make the testing problem + easier.

+ +

By reconstructing the RUT I mean that we refactor the code to bring + any random variables or state variables to the interface where they + are then treated as inputs and outputs.

+ +

If placing state variables on the interface is adopted as a discipline by + the developers, reconstruction will not be needed in the test phase, or if + it is needed, white box testers will see this, and it will be a bug that + has been caught. Otherwise reconstruction leads to two versions of a + routine, one that has been reconstructed, and the other that has not. The + leverage gained on the testing problem by reconstructing a routine + typically more than outweighs the extra verification problem of comparing + the before and after routines.

+ +

As an example, consider our adder function with a random fault. As we + know from prior analysis, changing the fault to a random number makes + testing harder, but perhaps more importantly, it makes it nearly impossible + to debug, as the tester can not hand it to the developer and say, + 'it fails in this case'.

+

+      int64 sum(int64 a, int64 b){
+        if( a == (5717710 * rand()) && b == (27 * rand()) ) return 5;
+        else return a + b;
+      }
+    
+ +

The tester refactors this function as:

+

+      int64 sum( int64 a, int64 b, a0 = 5717710*rand() ,b0 = 27*rand() ){
+        if( a == a0 && b == b0 ) return 5;
+        else return a + b;
+      }
+    
+ +

Here a0 and b0 are added to the interface as + optional arguments. During testing their values will be supplied, during + production the defaults will be used. Thus, we have broken the one + test problem into two, the question if sum works, and the + question if the random number generation works.

+ +

Failures in sum found during testing are now reproducible. + If the tester employs the informed spot checking the failure will + be found with few tests, and the point in the input space where the + failure occurs can be reported to development and used for debugging.

+ +

Here is a function that keeps a state variable between calls.

+

+    int state = 0;
+    int call_count = 0; 
+    void state_machine(int input) {
+        int choice = (input >> call_count) & 1; 
+        switch (state) {
+            case 0:
+                printf("State 0: Initializing...\n");
+                state = choice ? 0 : 1;
+                break;
+            case 1:
+                printf("State 1: Processing Path A...\n");
+                state = choice ? 0 : 2; 
+                break;
+            case 2:
+                printf("State 2: Processing Path B...\n");
+                state = choice ? 0 : 3;
+                break;
+        }
+        call_count++;
+    }
+    
+ +

The Mosaic Testbench makes standard out available to the test routine in + an array so we can capture and examine the print value while testing this + RUT. Because of the state variables, state + and count, this routine will behave differently each time it + is called. A black box test will have a large number of input vector + sequences to try. The failure occurs in the call after being in state 2 + and the count is such that the choice is to go to state 3.

+ +

+    int state = 0;
+    int call_count = 0; 
+    void state_machine(int input ,int state0 = state ,int call_count0 = call_count) {
+        int choice = (input >> call_count0) & 1; 
+        switch (state0) {
+            case 0:
+                printf("State 0: Initializing...\n");
+                state = choice ? 0 : 1;
+                break;
+            case 1:
+                printf("State 1: Processing Path A...\n");
+                state = choice ? 0 : 2; 
+                break;
+            case 2:
+                printf("State 2: Processing Path B...\n");
+                state = choice ? 0 : 3;
+                break;
+        }
+        call_count = call_count0 + 1;
+    }
+    
+ +

Here the test routine supplies state0 and call_count0 + as inputs. The test routine treats state and call_ccount + as outputs, so this is then a pure function. As a pure function it is a much easier + testing problem. Now instead of a combinatorially hard problem involving input + sequences, the test routine can visit each of the three states, and set the input + such that each visits the two next states. That is six test cases to see everything + that this function is capable of doing.

+ +

Any time the RUT is refactored in the testing phase, it raises the + question if the refactored code maintains the required functionality. + This becomes another verification problem, which might or might not + be verified through testing. One way to manage this issue is to + take the refactoring problems back to the developers to have them + adopt the code into the project. Then it becomes the original code.

+ +

Bottom Up Testing

+ +

When a function corresponds directly to CPU instructions, such as is the + case for the + operator, we typically trust that it will give + the right answer. The same can be said for the call and return + dynamic. Unless we are working on a new compiler, it is typically assumed + that this works. Tests for it are not included for testing if calls work in + application program test suites. +

+ +

The reason for this trust is that CPU instructions, and function calls + are already extensively tested, both directly by the manufacturers, and + through widespread use. Though this trust is not always warranted as in + the case of the Intel Pentium divider, which had failure cases.

+ +

We can decompose a testing problem into trusted and untrusted components. + We call routines that are trusted building blocks, + then we use the building blocks to build up larger routines, and then + test those to create larger building blocks. At the end we will have + built up a trustworthy program.

+ +

This approach parallels what developers do when they write programs. They + start with primitive programs that come with the language or from + libraries, and then they compose these to write custom functions.

+ +

The following is an expansion of our adder example for creating and + testing an adder for 1024 bit numbers. For purposes of presentation, we + will refer to int256 as a type that corresponds to array of + 32 bytes, and uint1 as a 1 bit unsigned integer, i.e. 0 or + 1.

+ +

+    {uint1, uint64} full_adder(uint64 a, uint64 b, uint1 c0) {
+        uint64 partial_sum = a + b;
+        uint64 sum = partial_sum + c0;
+        uint1 carry_out = (partial_sum < a) || (sum < partial_sum);
+        return {carry_out, sum};
+    }
+    
+ +

Here is a 256 bit adder made from 64 bit adders.

+ +

+    {uint1, int256} add_256(int256 a, int256 b) {
+        uint1 carry_in = 0;
+        int64 sum_parts[4];  // Array to store each 64-bit segment of the sum
+
+        for i = 0 to 3 {
+            // Get the i-th 64-bit segments of a and b
+            int64 a_part = (a >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
+            int64 b_part = (b >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
+
+            // Perform the full addition on each 64-bit part
+            {carry_out, sum_parts[i]} = full_adder(a_part, b_part, carry_in);
+
+            // Update carry-in for the next 64-bit segment
+            carry_in = carry_out;
+        }
+
+        int256 sum = 0;
+        for i = 0 to 3 {
+            sum |= (sum_parts[i] << (i * 64));
+        }
+
+        return {carry_in, sum};
+    }
+    
+ +

According to the bottom up technique, we first test + the full_adder, which is not a difficult testing problem. It + employs well known trusted operations, and has a couple of interesting + special case conditions. Given the numeric nature of this code, these + special case conditions are probably better verified by proof than by + testing, but they can be tested.

+ +

Once the full_adder can be trusted, testing add_256 + reduces to checking that the various 64 bit parts are extracted and then + packed correctly, + and are not, say, offset by one, and that the carries are properly communicated + during the add.

+ +

Note this test also trusts the fact that ripple carry addition is a valid + algorithm for assembling the pieces. Thus there is a new verification + problem, that for the algorithm. In this case, ripple carry addition is + already a trusted algorithm.

+ +

Testing of full_adder could be further simplified with + refactoring, by moving the loop control variables to the interface and the + carry_in and carry_out to the interface. + As i is recycled, it would become two variables, + say i and j. Once the loop control variables + are on the interface it is straight forward to test the packing. Once the + carries are on the interface it is straight forward to test the + carries.

+ +

In general all programs and circuits can be conceptualized as functional + units, channels, and protocols. A test that shows that these work as specified, + shifts the test problem from the RUT to the specification.

+ +

Adding to the code

+ +

It is a common practice to add property checks to the code for gathering + data about failures or other potential problems. These will then write to + log files, or even send messages back to the code maintainers. By doing + this the testers benefit from the actual use of the product as though it + were a test run. When failures are found, such code might then trigger + remedial or recovery actions.

+ +

About Reference Outputs and Reference Properties

+ +

When testing during development, reference outputs often come from the + developers or testers themselves. They know what they expect from the + routines, but they do not know if the code will meet these expectations, + so they write tests. Typically, they try to imagine the hardest possible + cases. However, sometimes a young developer avoids testing challenging + cases to sidestep the risk of failures—this is, of course, a poor approach + that can lead to undetected issues.

+ +

Often, specification authors provide reference outputs or extensive test + suites that must be passed to achieve certification. Architects also + contribute by creating multi-level specifications—for the entire program, + for the largest components, and for communication protocols between + components. These specifications often serve as high-quality reference + outputs and property checks that can be applied to the model during testing. + The goal of developers and testers is to meet these specifications, making + failures directly relevant to the development process and program design.

+ +

Experts in a specific area sometimes provide test data, maintaining + a database of reference data as a resource for validating outputs. + For some types of code, experts also supply property checks, which + evaluate whether outputs satisfy essential properties rather than specific + values. Depending on the domain, these properties can be an important aspect + of the testing process.

+ +

Each time a bug is found, a test should be created to capture a failure + related to that bug. Ideally, such tests are written with minimal + implementation-specific details so they remain relevant even after code + changes. These tests are then added to a regression testing suite, ensuring + that future changes do not reintroduce the same issues.

+ +

For applications involving multi-precision arithmetic, such as the earlier + adder example, reference data is often sourced from another established + multi-precision library, whether an open-source or commercial product. The + assumption is that an existing product will be more reliable than a newly + developed one, and since it’s implemented differently, its errors are likely + to be uncorrelated. This competitive testing, which is aspect of + compatibility testing, here being used for other objectives. In the limit, as + the RUT matures, this approach will tend to identify bugs in the reference + data from the other company as often it does in the RUT, which might be an + interesting effect.

+ +

In some cases, reference data comes from historical sources or existing + systems. When upgrading or replacing a legacy system, historical data + serves as a benchmark for comparison. Similarly, industry standards + and compliance datasets, particularly from regulatory organizations + like IEEE, NIST, or ISO, provide reliable reference points for applications + requiring standardized outputs. Compliance-driven tests are often required + for certification or regulatory approval in fields such as finance, + healthcare, and aerospace.

+ +

For cases requiring many inputs without needing specific reference values, + random number generators can provide extensive test data. Examples include in + comparative testing and when property checking. Random number generators can + also be configured to concentrate cases in specific areas of the input domain + that for some reason concerns the testers.

+ +

Customer and user feedback sometimes uncovers additional test cases, + especially when dealing with complex or evolving software. Feedback + reveals edge cases or expected behaviors that developers and testers + may not have anticipated, allowing teams to create reference points + for new test cases that cover real-world use cases and address user needs.

+ +

Conclusion

+ +

If you are a typical tester or developer reading through the previous list, + you might feel a bit disappointed. Unless you work in a specialized area, + are attempting to create a compatible product, or need to exercise the hardware, much + of that list might seem inapplicable. For many developers, the most + applicable advice remains: "During development, reference outputs often + come from the developers or testers themselves." I apologize if this seems + limiting, but consider this: the reason we run programs is to generate the + very data we're looking for. If that data were easily available, we wouldn’t + need the program.

+ +

In many ways, testing is about making developers and testers the first + users of the product. All products will have bugs; it’s far better for + experts to encounter these issues first.

+ +

Testing also facilitates communication among project members. Are the + architects, developers, and testers all on the same page about how the + product should work? The only way to find out is to run what has been built + and observe it in action. For this, we need test cases.

+ +

This circular problem—finding data that our program should generate - to test + the program itself — illustrates a fundamental limitation in software testing. + We encountered this in the discussion on unstructured, black-box testing: as + soon as we open the box to inspect the code, we are no longer just testing it, + but reasoning about it and even verifying it formally.

+ +

This, perhaps, hints at a way forward. Our program is a restatement of the + specification in another language. Verification, then, is an equivalence + check. We can run examples to demonstrate equivalence, but black-box testing + alone will have limited impact. Alternatively, we can examine our code and + try to prove that it matches the specification. Though challenging, this + approach is far more feasible than waiting ten times the age of the universe + to confirm our solution through black box testing.

+ +

Think of testing as a reasoning problem. Explain why the routine works and + how it contributes to meeting the specification. Work from the top down: if + the high-level components behave correctly, the program will meet the + specification. That’s the first step. Then explain why the breakdown of + those top-level components ensures correct behavior. Continue this process, + and then use tests to validate each link in this chain of reasoning. In this + way, you can generate meaningful reference values.

+ +
+ + + + diff --git "a/document\360\237\226\211/license.txt" "b/document\360\237\226\211/license.txt" new file mode 100644 index 0000000..e177f6f --- /dev/null +++ "b/document\360\237\226\211/license.txt" @@ -0,0 +1,152 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that +entity. For the purposes of this definition, "control" means (i) the power, +direct or indirect, to cause the direction or management of such entity, whether +by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of +the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, +each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) patent +license to make, have made, use, offer to sell, sell, import, and otherwise +transfer the Work, where such license applies only to those patent claims +licensable by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) with the Work +to which such Contribution(s) was submitted. If You institute patent litigation +against any entity (including a cross-claim or counterclaim in a lawsuit) +alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate as of the date +such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and You must cause any modified files to carry prominent notices +stating that You changed the files; and You must retain, in the Source form of +any Derivative Works that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, excluding those notices +that do not pertain to any part of the Derivative Works; and If the Work +includes a "NOTICE" text file as part of its distribution, then any Derivative +Works that You distribute must include a readable copy of the attribution +notices contained within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one of the following +places: within a NOTICE text file distributed as part of the Derivative Works; +within the Source form or documentation, if provided along with the Derivative +Works; or, within a display generated by the Derivative Works, if and wherever +such third-party notices normally appear. The contents of the NOTICE file are +for informational purposes only and do not modify the License. You may add Your +own attribution notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided that such +additional attribution notices cannot be construed as modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to the +Licensor shall be under the terms and conditions of this License, without any +additional terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may have +executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any character +arising as a result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, work stoppage, +computer failure or malfunction, or any and all other commercial damages or +losses), even if such Contributor has been advised of the possibility of such +damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree to +indemnify, defend, and hold each Contributor harmless for any liability incurred +by, or claims asserted against, such Contributor by reason of your accepting any +such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git "a/document\360\237\226\211/readme.txt" "b/document\360\237\226\211/readme.txt" new file mode 100644 index 0000000..4a42a93 --- /dev/null +++ "b/document\360\237\226\211/readme.txt" @@ -0,0 +1,10 @@ + +-------------------------------------------------------------------------------- +Mosaic + +A tool to assist in hierarchical white box testing. + +Each piece of a program must have integrity for the complete picture to emerge. + +With Mosaic we test the pieces, then the communication between the pieces. + diff --git "a/document\360\237\226\211/todo.txt" "b/document\360\237\226\211/todo.txt" new file mode 100644 index 0000000..06f2ba3 --- /dev/null +++ "b/document\360\237\226\211/todo.txt" @@ -0,0 +1,27 @@ + +1. More languages support, notably nodejs. + +2. This message: + + Running Test_File_0...Structural problem: unpack_file_path_0 does not accept a single IO argument. + Error: unpack_file_path_0 has an invalid structure. + + Perhaps only the second one, getting rid of 'Error:' + + "Bad type signature for method: unpack_file_path_0 does not accept a single IO argument." + +3. TestBench -> Testbench perhaps? + +4. fix emacs.el so that jdbx doesn't always start with Test_Util. (note the + prior start can be found with M-x p + + +5. should include a tool for aggregating test suite runs + FYI, I'm using + +6. need an example .bashrc for setting the prompt now that I removed references +to the 'resource' project and plan to deprecate it. + +7. should check stdin for unused input and report error also. + +8. move 'unlock_class` from Ariadne tests to Mosaic_Util. diff --git a/tool/.githolder b/tool/.githolder deleted file mode 100644 index e69de29..0000000 diff --git a/tool/env b/tool/env deleted file mode 100644 index 73eaff8..0000000 --- a/tool/env +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="tool_shared/bespoke/env" - error=false - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - error=true - fi - if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - echo "$script_afp:: This script must be sourced, not executed." - error=true - fi - if $error; then exit 1; fi - -export PATH=\ -"$REPO_HOME"/tool_shared/bespoke/\ -:"$PATH" - -# expose sneaky hidden files -alias ls="ls -a" - -# some feedback to show all went well - - export PROMPT_DECOR="$PROJECT"_administrator - export ENV=$(script_fp) - echo ENV "$ENV" - - - - diff --git a/tool_shared/document/#install_java.txt# b/tool_shared/document/#install_java.txt# deleted file mode 100644 index 0091eac..0000000 --- a/tool_shared/document/#install_java.txt# +++ /dev/null @@ -1,11 +0,0 @@ - -#1. downlaod - -cd "$REPO_HOME/tool/upstream" -curl -C - -o OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.16+8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz - -#2. extract - -cd "$REPO_HOME/tool" -mkdir -p jdk-11 -tar -xzf "$REPO_HOME/tool/upstream/OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz" -C jdk-11 --strip-components 1 diff --git a/tool_shared/document/install_java.txt b/tool_shared/document/install_java.txt deleted file mode 100644 index c9e5743..0000000 --- a/tool_shared/document/install_java.txt +++ /dev/null @@ -1,11 +0,0 @@ - -#1. downlaod - -cd "$REPO_HOME/tool/upstream" -curl -C - -o OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.16+8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz - -#2. extract - -cd "$REPO_HOME/tool" -mkdir jdk-11 -tar -xzf "$REPO_HOME/tool/upstream/OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz" -C jdk-11 --strip-components 1 diff --git "a/tool_shared/document\360\237\226\211/#install_java.txt#" "b/tool_shared/document\360\237\226\211/#install_java.txt#" new file mode 100644 index 0000000..0091eac --- /dev/null +++ "b/tool_shared/document\360\237\226\211/#install_java.txt#" @@ -0,0 +1,11 @@ + +#1. downlaod + +cd "$REPO_HOME/tool/upstream" +curl -C - -o OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.16+8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz + +#2. extract + +cd "$REPO_HOME/tool" +mkdir -p jdk-11 +tar -xzf "$REPO_HOME/tool/upstream/OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz" -C jdk-11 --strip-components 1 diff --git "a/tool_shared/document\360\237\226\211/install.txt" "b/tool_shared/document\360\237\226\211/install.txt" new file mode 100644 index 0000000..543d228 --- /dev/null +++ "b/tool_shared/document\360\237\226\211/install.txt" @@ -0,0 +1,48 @@ + +---------------------------------------- +env_administrator + +For mucking around with the tools install and config, cd to the top of +the project and source the env_administrator environment. + + git clone + cd project + source env_administrator + +---------------------------------------- +RT-incommon + +This pulls in documents and commonly used scripts. The project has symbolic links +into RT-icommon, so this is not optional. + + cd "$REPO_HOME/tool_shared/third_party/" + git clone https://github.com/Thomas-Walker-Lynch/resource.git + ln -s "$REPO_HOME/tool_shared/third_party/resource/document" see_also + +---------------------------------------- +jdk-23 + + cd "$REPO_HOME/tool_shared/third_party/upstream" + + # source for the 11 version used before, now upgraded to 23 + #curl -C - -o OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.16+8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.16_8.tar.gz + curl -L -C - -b "oraclelicense=accept-securebackup-cookie" -O https://download.oracle.com/java/23/latest/jdk-23_linux-x64_bin.tar.gz + + cd .. + tar -xzf upstream/jdk-23_linux-x64_bin.tar.gz + + edit $REPO_HOME/tool_shared/bespoke/env, and update JAVA_HOME: + export JAVA_HOME="$REPO_HOME/tool_shared/third_party/jdk-23.0.1" + +---------------------------------------- +IDE + +This is not strictly necessary, but a local install of an IDE will assure it is +sync with the rest of the project build for configuration files and tools built +in to it. + +See the install_emacs.txt and/or install_IDEA.txt files. + +Note, I am using emacs mainly, but also configured and ran IntelliJ IDEA to make +sure it was working. + diff --git "a/tool_shared/document\360\237\226\211/install_emacs.txt" "b/tool_shared/document\360\237\226\211/install_emacs.txt" new file mode 100644 index 0000000..63c8d6b --- /dev/null +++ "b/tool_shared/document\360\237\226\211/install_emacs.txt" @@ -0,0 +1,19 @@ + +# install and build script: + +cd "$REPO_HOME"/tool_shared/third_party +mkdir -p emacs/{src build bin} + +pushd upstream +curl -L -O https://ftp.gnu.org/gnu/emacs/emacs-29.4.tar.gz +popd + +tar -xzf upstream/emacs-29.4.tar.gz -C emacs/src --strip-components=1 + +pushd emacs/src +/configure --prefix=emacs/build +make -j$(nproc) +make install DESTDIR=../bin +popd + +rm -r emacs/build diff --git "a/tool\360\237\226\211/.githolder" "b/tool\360\237\226\211/.githolder" new file mode 100644 index 0000000..e69de29 diff --git "a/tool\360\237\226\211/env" "b/tool\360\237\226\211/env" new file mode 100644 index 0000000..73eaff8 --- /dev/null +++ "b/tool\360\237\226\211/env" @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="tool_shared/bespoke/env" + error=false + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + error=true + fi + if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "$script_afp:: This script must be sourced, not executed." + error=true + fi + if $error; then exit 1; fi + +export PATH=\ +"$REPO_HOME"/tool_shared/bespoke/\ +:"$PATH" + +# expose sneaky hidden files +alias ls="ls -a" + +# some feedback to show all went well + + export PROMPT_DECOR="$PROJECT"_administrator + export ENV=$(script_fp) + echo ENV "$ENV" + + + +