From: Thomas Walker Lynch Date: Sun, 12 Oct 2025 10:38:12 +0000 (+0000) Subject: removing pencils, was a nice idea .. X-Git-Url: https://git.reasoningtechnology.com/style/rt_dark_doc.css?a=commitdiff_plain;h=94f8b5b7cd5e13eb7147481eaf8500e634ba28b5;p=Mosaic removing pencils, was a nice idea .. --- diff --git a/developer/document/build_transcript_v1.0.txt b/developer/document/build_transcript_v1.0.txt new file mode 100644 index 0000000..29aa92c --- /dev/null +++ b/developer/document/build_transcript_v1.0.txt @@ -0,0 +1,58 @@ +> cd Mosaic +> source env_developer +> emacs & + +... + +2024-11-04T11:19:53Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/developer ++ rm -r scratchpad/com ++ rm jvm/Mosaic.jar ++ rm shell/Mosaic ++ set +x +clean_build_directories done. + +2024-11-04T11:20:14Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> wipe_release ++ cd /var/user_data/Thomas-developer/Mosaic ++ rm -rf release/Mosaic release/Mosaic.jar ++ set +x +wipe_release done. + +2024-11-04T11:20:18Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> make +Compiling files... ++ javac -g -d scratchpad javac/IO.java javac/Mosaic.java javac/TestBench.java javac/Util.java ++ set +x +Creating JAR file... ++ jar_file=jvm/Mosaic.jar ++ mkdir -p jvm ++ jar cf jvm/Mosaic.jar -C scratchpad . ++ set +x +JAR file created successfully: jvm/Mosaic.jar +Creating shell wrappers... +developer/tool/make done. + +2024-11-04T11:20:40Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> release +Starting release process... +Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r +Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x +developer/tool/release done. + +2024-11-04T11:20:44Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> clean_make_output ++ cd /var/user_data/Thomas-developer/Mosaic/developer ++ rm -r scratchpad/com/ReasoningTechnology/Mosaic ++ rm jvm/Mosaic.jar ++ rm 'shell/{Mosaic}' +rm: cannot remove 'shell/{Mosaic}': No such file or directory ++ set +x +clean_make_output done. + diff --git a/developer/document/build_transcript_v1.1.txt b/developer/document/build_transcript_v1.1.txt new file mode 100644 index 0000000..0a00aba --- /dev/null +++ b/developer/document/build_transcript_v1.1.txt @@ -0,0 +1,63 @@ + +--- setting up the environment: + + +024-11-08T07:40:57Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> bash + +2024-11-08T07:41:19Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> cd Mosaic + +2024-11-08T07:41:25Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> . env_developer +REPO_HOME /var/user_data/Thomas-developer/Mosaic +PROJECT Mosaic +ENV tool_shared/bespoke/env +ENV developer/tool/env + +2024-11-08T07:41:34Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> emacs & + + +--- building the release candidate + +2024-11-08T09:58:08Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/developer ++ rm -r scratchpad/com ++ rm jvm/Mosaic.jar ++ rm shell/Mosaic ++ set +x +clean_build_directories done. + +2024-11-08T09:58:16Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> make +Compiling files... ++ javac -g -d scratchpad javac/Mosaic_IO.java javac/Mosaic_Mosaic.java javac/Mosaic_Testbench.java javac/Mosaic_Util.java ++ set +x +Creating JAR file... ++ jar_file=jvm/Mosaic.jar ++ mkdir -p jvm ++ jar cf jvm/Mosaic.jar -C scratchpad . ++ set +x +JAR file created successfully: jvm/Mosaic.jar +Creating shell wrappers... +developer/tool/make done. + +2024-11-08T09:58:21Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> release +Starting release process... +Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r +Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x +developer/tool/release done. + +2024-11-08T09:58:24Z[Mosaic_developer] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ +> diff --git a/developer/document/the_build_environmet.txt b/developer/document/the_build_environmet.txt new file mode 100644 index 0000000..cd491db --- /dev/null +++ b/developer/document/the_build_environmet.txt @@ -0,0 +1,84 @@ +1. Tool + +The directory called `tool` has tools for the developer. There are comments at +the top of each that says what it does. + +In the tool directory, `env` sets the PATH, CLASSPATH, and prepares the developer's +environment. Noting will work right until this is sourced. (This is similar to +Python's `venv`.) + +The tool called `make` builds the project. This is not the venerable `/bin/make` +but is a simple bash script. It is going to compile everything in the `javac` +directory. + +The tool called `shall_wrapper_list` gives a list of classes names that are to +be given direct call shell wrappers. `make` will put these in the `shell` +directory. + +The `clean_` scripts are there to delete files so that developers do not have +to type `rm` commands. This helps prevent accidents. Note the +$REPO_HOME/tool_shared/bespoke/wipe_release script will remove files from the +../release directory. + +2. build + +`make` runs `javac` which puts the class files into the `scratch_pad` directory. +It will `makedir` a directory hierarchy in `scratch_pad` that mirrors the +package name. + +After compiling `make` then gathers the class files found in the scratchpad +directory hierarchy and puts them into a `.jar` file. Said `.jar` file will +be located in the directory `jvm`. + +The `scratch_pad` directory is not pushed to the repo. It can be cleaned +at any time, because it can always be rebuilt. + +3. release + +The `release` script will make a copy of the scripts in `shell` and the `.jar` +file in `jvm` and put them in the `$REPO_HOME/release` directory. This +comprises the release candidate. After a release branch is made, this becomes +the actual release. Note the script in `$REPO_HOME/bespoke/version` which +outputs the version for released code. + + +4. debug + +If you use emacs note the file `$REPO_HOME/test_shared/bespoke/emacs.el'. + +Edit `make` to add or remove the `-g` flag from `javac`. This controls putting +source code information into the class files. + +After `javac` is compiled with the `-g` flag, and in the `jdb` debugger, `jdb` +will look into the `scratchpad` directory hierarchy where the sources were +put to find the sources files to display when single stepping etc. + +The `distribute_source` tool adds links into the `scratchpad` directory hierarchy +the point back into the `javac` directory. After these links are made, `jdb` +will show the sources, and should the sources be edited, the originals located +in the `javac` directory will be modified. + +5. debug from the `tester` environment + +The tester environment points at the release candidate located in the +$REPO_HOME/release directory to find the java classes. + +If this release candidate was compiled with the `-g` flag, then it will have +embedded in it source information pointing back into the +`$REPO_HOME/developer/scratchpad` directory. + +If the `distribute_source` was not called by the developer, or the scratchpad +contents have been cleaned, jdb will not be able to find the sources. +If jdb does find the sources, and the tester edits them, then the originals +in the `$REPO_HOME/developer/javac` directory will be modified. If this +behavior is not desired, then put the tester on a `core_tester_branch`, then +inspect changes before merging them back to the `core_developer_branch`. + +This setup makes it possible for developers to use the tester environment +to work, without having to be on a separate branch, or for testers to +work separately. + + + + + diff --git "a/developer/document\360\237\226\211/build_transcript_v1.0.txt" "b/developer/document\360\237\226\211/build_transcript_v1.0.txt" deleted file mode 100644 index 29aa92c..0000000 --- "a/developer/document\360\237\226\211/build_transcript_v1.0.txt" +++ /dev/null @@ -1,58 +0,0 @@ -> cd Mosaic -> source env_developer -> emacs & - -... - -2024-11-04T11:19:53Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/developer -+ rm -r scratchpad/com -+ rm jvm/Mosaic.jar -+ rm shell/Mosaic -+ set +x -clean_build_directories done. - -2024-11-04T11:20:14Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> wipe_release -+ cd /var/user_data/Thomas-developer/Mosaic -+ rm -rf release/Mosaic release/Mosaic.jar -+ set +x -wipe_release done. - -2024-11-04T11:20:18Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> make -Compiling files... -+ javac -g -d scratchpad javac/IO.java javac/Mosaic.java javac/TestBench.java javac/Util.java -+ set +x -Creating JAR file... -+ jar_file=jvm/Mosaic.jar -+ mkdir -p jvm -+ jar cf jvm/Mosaic.jar -C scratchpad . -+ set +x -JAR file created successfully: jvm/Mosaic.jar -Creating shell wrappers... -developer/tool/make done. - -2024-11-04T11:20:40Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> release -Starting release process... -Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r -Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x -developer/tool/release done. - -2024-11-04T11:20:44Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> clean_make_output -+ cd /var/user_data/Thomas-developer/Mosaic/developer -+ rm -r scratchpad/com/ReasoningTechnology/Mosaic -+ rm jvm/Mosaic.jar -+ rm 'shell/{Mosaic}' -rm: cannot remove 'shell/{Mosaic}': No such file or directory -+ set +x -clean_make_output done. - diff --git "a/developer/document\360\237\226\211/build_transcript_v1.1.txt" "b/developer/document\360\237\226\211/build_transcript_v1.1.txt" deleted file mode 100644 index 0a00aba..0000000 --- "a/developer/document\360\237\226\211/build_transcript_v1.1.txt" +++ /dev/null @@ -1,63 +0,0 @@ - ---- setting up the environment: - - -024-11-08T07:40:57Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> bash - -2024-11-08T07:41:19Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> cd Mosaic - -2024-11-08T07:41:25Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> . env_developer -REPO_HOME /var/user_data/Thomas-developer/Mosaic -PROJECT Mosaic -ENV tool_shared/bespoke/env -ENV developer/tool/env - -2024-11-08T07:41:34Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> emacs & - - ---- building the release candidate - -2024-11-08T09:58:08Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/developer -+ rm -r scratchpad/com -+ rm jvm/Mosaic.jar -+ rm shell/Mosaic -+ set +x -clean_build_directories done. - -2024-11-08T09:58:16Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> make -Compiling files... -+ javac -g -d scratchpad javac/Mosaic_IO.java javac/Mosaic_Mosaic.java javac/Mosaic_Testbench.java javac/Mosaic_Util.java -+ set +x -Creating JAR file... -+ jar_file=jvm/Mosaic.jar -+ mkdir -p jvm -+ jar cf jvm/Mosaic.jar -C scratchpad . -+ set +x -JAR file created successfully: jvm/Mosaic.jar -Creating shell wrappers... -developer/tool/make done. - -2024-11-08T09:58:21Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> release -Starting release process... -Installed Mosaic.jar to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r -Installed Mosaic to /var/user_data/Thomas-developer/Mosaic/release with permissions ug+r+x -developer/tool/release done. - -2024-11-08T09:58:24Z[Mosaic_developer] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/developer§ -> diff --git "a/developer/document\360\237\226\211/the_build_environmet.txt" "b/developer/document\360\237\226\211/the_build_environmet.txt" deleted file mode 100644 index cd491db..0000000 --- "a/developer/document\360\237\226\211/the_build_environmet.txt" +++ /dev/null @@ -1,84 +0,0 @@ -1. Tool - -The directory called `tool` has tools for the developer. There are comments at -the top of each that says what it does. - -In the tool directory, `env` sets the PATH, CLASSPATH, and prepares the developer's -environment. Noting will work right until this is sourced. (This is similar to -Python's `venv`.) - -The tool called `make` builds the project. This is not the venerable `/bin/make` -but is a simple bash script. It is going to compile everything in the `javac` -directory. - -The tool called `shall_wrapper_list` gives a list of classes names that are to -be given direct call shell wrappers. `make` will put these in the `shell` -directory. - -The `clean_` scripts are there to delete files so that developers do not have -to type `rm` commands. This helps prevent accidents. Note the -$REPO_HOME/tool_shared/bespoke/wipe_release script will remove files from the -../release directory. - -2. build - -`make` runs `javac` which puts the class files into the `scratch_pad` directory. -It will `makedir` a directory hierarchy in `scratch_pad` that mirrors the -package name. - -After compiling `make` then gathers the class files found in the scratchpad -directory hierarchy and puts them into a `.jar` file. Said `.jar` file will -be located in the directory `jvm`. - -The `scratch_pad` directory is not pushed to the repo. It can be cleaned -at any time, because it can always be rebuilt. - -3. release - -The `release` script will make a copy of the scripts in `shell` and the `.jar` -file in `jvm` and put them in the `$REPO_HOME/release` directory. This -comprises the release candidate. After a release branch is made, this becomes -the actual release. Note the script in `$REPO_HOME/bespoke/version` which -outputs the version for released code. - - -4. debug - -If you use emacs note the file `$REPO_HOME/test_shared/bespoke/emacs.el'. - -Edit `make` to add or remove the `-g` flag from `javac`. This controls putting -source code information into the class files. - -After `javac` is compiled with the `-g` flag, and in the `jdb` debugger, `jdb` -will look into the `scratchpad` directory hierarchy where the sources were -put to find the sources files to display when single stepping etc. - -The `distribute_source` tool adds links into the `scratchpad` directory hierarchy -the point back into the `javac` directory. After these links are made, `jdb` -will show the sources, and should the sources be edited, the originals located -in the `javac` directory will be modified. - -5. debug from the `tester` environment - -The tester environment points at the release candidate located in the -$REPO_HOME/release directory to find the java classes. - -If this release candidate was compiled with the `-g` flag, then it will have -embedded in it source information pointing back into the -`$REPO_HOME/developer/scratchpad` directory. - -If the `distribute_source` was not called by the developer, or the scratchpad -contents have been cleaned, jdb will not be able to find the sources. -If jdb does find the sources, and the tester edits them, then the originals -in the `$REPO_HOME/developer/javac` directory will be modified. If this -behavior is not desired, then put the tester on a `core_tester_branch`, then -inspect changes before merging them back to the `core_developer_branch`. - -This setup makes it possible for developers to use the tester environment -to work, without having to be on a separate branch, or for testers to -work separately. - - - - - diff --git a/developer/javac/Mosaic_Dispatcher.java b/developer/javac/Mosaic_Dispatcher.java new file mode 100644 index 0000000..b3b166e --- /dev/null +++ b/developer/javac/Mosaic_Dispatcher.java @@ -0,0 +1,693 @@ +package com.ReasoningTechnology.Mosaic; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + + +/*-------------------------------------------------------------------------------- + Is a signature for a Method + + The envisioned use case is the 'method signature' -> handle map. + + Perhaps the existing method signature in the Reflection library can + replace this, but most of the work done here is the formatting done + in the constructors. +*/ + +class MethodSignature{ + // header + private Class return_type; + private String class_name; + private String method_name; + + // variable length parameter type list + private Class[] parameter_type_list; + + // field access and strings + // + public String get_class_name(){ + return class_name; + } + + public String get_method_name(){ + return method_name; + } + + public Class get_return_type(){ + return return_type; + } + + public Class[] get_parameter_type_list(){ + return parameter_type_list; + } + + public String to_string_return_type(){ + return get_return_type() != null ? get_return_type().getSimpleName() : "null"; + } + + public String to_string_parameter_type_list(){ + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < get_parameter_type_list().length; i++){ + sb.append(get_parameter_type_list()[i] != null ? get_parameter_type_list()[i].getSimpleName() : "null"); + if (i < get_parameter_type_list().length - 1) sb.append(" ,"); + } + return sb.toString(); + } + + public String to_string_signature_header(){ + return to_string_return_type() + " " + get_class_name() + "." + get_method_name(); + } + + // constructors + // + private void init_header + ( + Class return_type + ,String class_name + ,String method_name + ){ + this.return_type = return_type; + this.class_name = class_name; + this.method_name = method_name; + } + + // Signature when given a Method. + // Used when putting methods into the method signature to handle map. + public MethodSignature(Method method){ + init_header + ( + method.getReturnType() + ,method.getDeclaringClass().getName() + ,method.getName() + ); + this.parameter_type_list = method.getParameterTypes(); + } + + // Signature when given a parameter type list. + // Used putting constructors into the signature to handle map. + public MethodSignature + ( + Class return_type + ,String class_name + ,String method_name + ,Class[] parameter_type_list + ){ + init_header(return_type ,class_name ,method_name); + this.parameter_type_list = parameter_type_list; + } + + // Signature when given an argument value list. + // Used by `invoke`. + public MethodSignature + ( + Class return_type + ,String class_name + ,String method_name + ,Object[] arg_list + ){ + init_header(return_type ,class_name ,method_name); + + // Set the signature parameter type to the argument type. + // No automatic conversions are applied. + this.parameter_type_list = new Class[arg_list.length]; // Initialize the array + for(int i = 0; i < arg_list.length; i++){ + if(arg_list[i] instanceof Mosaic_IsPrimitive){ + parameter_type_list[i] =( (Mosaic_IsPrimitive) arg_list[i] ).get_type(); + } else if(arg_list[i] != null){ + parameter_type_list[i] = arg_list[i].getClass(); + } else{ + parameter_type_list[i] = null; + } + } + } + + // standard interface + // + @Override + public String toString(){ + return to_string_signature_header() + "(" + to_string_parameter_type_list() + ")"; + } + + @Override + public boolean equals(Object o){ + if(this == o) return true; + if(o == null) return false; + if(o.getClass() != MethodSignature.class) return false; + + MethodSignature signature = (MethodSignature) o; + + return + get_class_name().equals(signature.get_class_name()) + && get_method_name().equals(signature.get_method_name()) + && get_return_type().equals(signature.get_return_type()) + && Arrays.equals(get_parameter_type_list() ,signature.get_parameter_type_list()); + } + + @Override + public int hashCode(){ + int result = get_class_name().hashCode(); + result = 31 * result + get_method_name().hashCode(); + result = 31 * result + get_return_type().hashCode(); + result = 31 * result + Arrays.hashCode(get_parameter_type_list()); + return result; + } + +} + +/*-------------------------------------------------------------------------------- +This is a method signature to callable method handle dictionary. + +In the envisioned use case there is one such dictionary per +Dispatcher instance. + + */ +class MethodSignature_To_Handle_Map{ + + // Static test messaging + // + private static boolean test = false; + public static void test_switch(boolean test){ + if (MethodSignature_To_Handle_Map.test && !test){ + test_print("MethodSignature_To_Handle_Map:: test messages off"); + } + if (!MethodSignature_To_Handle_Map.test && test){ + test_print("MethodSignature_To_Handle_Map:: test messages on"); + } + MethodSignature_To_Handle_Map.test = test; + } + private static void test_print(String message){ + if(test){ + System.out.println(message); + } + } + + // instance data + // + private Map map; + + // field access and strings + // + + // constructors + // + public MethodSignature_To_Handle_Map(){ + map = new HashMap<>(); + } + + // methods for adding entries + // + public void add_class(Class class_metadata){ + test_print("MethodSignature_To_Handle_Map::add_class adding methods"); + add_methods(class_metadata); + + test_print("MethodSignature_To_Handle_Map::add_class adding constructors"); + add_constructors(class_metadata); + + test_print("MethodSignature_To_Handle_Map::add_class adding fields"); + // add_fields(class_metadata); + } + + private void add_entry(MethodSignature key ,MethodHandle value){ + test_print + ( + "(add_entry:: " + "(key " + key + ") " + "(value " + value + ")" + ")" + ); + map.put(key ,value); + } + + public void add_methods(Class class_metadata){ + try{ + MethodHandles.Lookup lookup = MethodHandles.lookup(); + MethodHandles.Lookup private_lookup = MethodHandles.privateLookupIn(class_metadata,lookup); + + for(Method method:class_metadata.getDeclaredMethods()){ + try{ + Class[] parameter_type_list=method.getParameterTypes(); + MethodSignature signature=new MethodSignature( + method.getReturnType(), + class_metadata.getName(), + method.getName(), + parameter_type_list + ); + + MethodType method_type=MethodType.methodType(method.getReturnType(),parameter_type_list); + MethodHandle method_handle; + + if((method.getModifiers() & Modifier.STATIC) != 0){ + method_handle = private_lookup.findStatic(class_metadata ,method.getName() ,method_type); + }else{ + method_handle = private_lookup.findSpecial(class_metadata ,method.getName() ,method_type ,class_metadata); + } + + add_entry(signature,method_handle); + + }catch(IllegalAccessException|NoSuchMethodException e){ + System.err.println + ( + "Mosaic_Dispatcher::add_methods unexpectedly failed to register method: " + + method.getName() + " in class: " + class_metadata.getName() + ); + e.printStackTrace(); + } + } + + }catch(IllegalAccessException e){ + System.err.println("Mosaic_Dispatcher::add_methods unexpectedly failed to initialize lookup for class: "+class_metadata.getName()); + e.printStackTrace(); + } + } + + public void add_constructors(Class class_metadata){ + try{ + + MethodHandles.Lookup lookup = MethodHandles.lookup(); + MethodHandles.Lookup private_lookup = MethodHandles.privateLookupIn(class_metadata ,lookup); + + for( Constructor constructor : class_metadata.getDeclaredConstructors() ){ + try{ + + Class[] parameter_type_list = constructor.getParameterTypes(); + MethodType method_type = MethodType.methodType(void.class ,parameter_type_list); + MethodHandle constructor_handle = private_lookup.findConstructor(class_metadata ,method_type); + + // Signature for constructors: with parameter types + MethodSignature signature = new MethodSignature + ( + void.class + ,class_metadata.getName() + ,"" + ,parameter_type_list + ); + add_entry(signature ,constructor_handle); + + }catch(IllegalAccessException|NoSuchMethodException e){ + System.err.println("Mosaic_Dispatcher::add_constructors unexpectedly failed to register constructor: " + class_metadata.getName()); + e.printStackTrace(); + } + } + + }catch(IllegalAccessException e){ + System.err.println("Mosaic_Dispatcher::add_constructors unexpectedly failed to initialize lookup for class: " + class_metadata.getName()); + e.printStackTrace(); + } + } + + public void add_fields(Class class_metadata){ + try{ + MethodHandles.Lookup lookup = MethodHandles.lookup(); + MethodHandles.Lookup private_lookup = MethodHandles.privateLookupIn(class_metadata ,lookup); + + for(Field field : class_metadata.getDeclaredFields()){ + try{ + // Field Metadata + String field_name = field.getName(); + Class field_type = field.getType(); + + // Create MethodHandle + MethodHandle read_handle = private_lookup.unreflectGetter(field); + MethodSignature read_signature = new MethodSignature + ( + field_type + ,class_metadata.getName() + ,"" + ,new Class[]{} + ); + add_entry(read_signature ,read_handle); + + // Create MethodHandle + MethodHandle write_handle = private_lookup.unreflectSetter(field); + MethodSignature write_signature = new MethodSignature + ( + void.class + ,class_metadata.getName() + ,"" + ,new Class[]{field_type} + ); + add_entry(write_signature ,write_handle); + + }catch(IllegalAccessException e){ + System.err.println("Mosaic_Dispatcher::add_fields unexpectedly failed to register field: " + field.getName()); + e.printStackTrace(); + } + } + }catch(IllegalAccessException e){ + System.err.println("Mosaic_Dispatcher::add_fields unexpectedly failed to initialize lookup for class: " + class_metadata.getName()); + e.printStackTrace(); + } + } + + + // methods for looking up handles + // + public MethodHandle lookup(MethodSignature s){ + return map.get(s); + } + + // standard interface + // + @Override + public String toString(){ + StringBuilder sb = new StringBuilder(); + sb.append("MethodSignature_To_Handle_Map:{").append(System.lineSeparator()); + + for(Map.Entry entry : map.entrySet()){ + sb.append(" ") + .append(entry.getKey().toString()) // MethodSignature's toString + .append(" -> ") + .append(entry.getValue().toString()) // MethodHandle's toString + .append(System.lineSeparator()); + } + + sb.append("}"); + return sb.toString(); + } + +} + +/*-------------------------------------------------------------------------------- + Given a class, dispatches calls to methods. + +*/ +public class Mosaic_Dispatcher{ + + // Static test messaging + // + private static boolean test = false; + public static void test_switch(boolean test){ + if(Mosaic_Dispatcher.test && !test){ + test_print("Mosaic_Dispatcher:: test messages off"); + } + if(!Mosaic_Dispatcher.test && test){ + test_print("Mosaic_Dispatcher:: test messages on"); + MethodSignature_To_Handle_Map.test_switch(true); + } + Mosaic_Dispatcher.test = test; + } + public static void test_print(String message){ + if(test){ + System.out.println(message); + } + } + + // instance data + // + private MethodSignature_To_Handle_Map map; + private Class target; + + + // field access and strings + // + public Class get_target(){ + return target; + } + + public MethodSignature_To_Handle_Map get_map(){ + return map; + } + + public String to_string_target(){ + return target != null ? target.getName() : "null"; + } + + // constructors + // + + // construct given the class metadata for the target class + public Mosaic_Dispatcher(Class target){ + this.map = new MethodSignature_To_Handle_Map(); + this.target = target; + test_print("Mosaic_Dispatcher:: mapping methods given class_metadata object: " + to_string_target()); + this.map.add_class(target); + } + + // Constructor accepting a fully qualified class name of the target class + public Mosaic_Dispatcher(String fully_qualified_class_name) throws ClassNotFoundException{ + this.map = new MethodSignature_To_Handle_Map(); + this.target = Class.forName(fully_qualified_class_name); + test_print("Mosaic_Dispatcher:: mapping methods from class specified by string: \"" + to_string_target() + "\""); + this.map.add_class(target); + } + + // methods unique to the class + // + public T read(String field_name){ + try{ + test_print("Call to Mosaic_Dispatcher::read( field_name )"); + + MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); + Field field = target.getDeclaredField(field_name); + MethodHandle handle = lookup.unreflectGetter(field); + return (T) handle.invoke(); + + }catch(NoSuchFieldException | IllegalAccessException e){ + System.out.println("Mosaic_Dispatcher::read of static exception:"); + e.printStackTrace(); + return null; + }catch(Throwable t){ + System.out.println("Mosaic_Dispatcher::read of static exception:"); + t.printStackTrace(); + return null; + } + } + + public T read(Object instance ,String field_name){ + try{ + test_print("Call to Mosaic_Dispatcher::read(instance ,field_name)"); + + MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); + Field field = target.getDeclaredField(field_name); + + if(instance == null || !target.isInstance(instance)){ + throw new IllegalArgumentException + ( + "Mosaic_Dispatcher::read provided instance is not of target type: " + + target.getName() + + ", but received: " + + (instance == null ? "null" : instance.getClass().getName()) + ); + } + MethodHandle handle = lookup.unreflectGetter(field); + return (T) handle.bindTo(instance).invoke(); + + }catch(NoSuchFieldException | IllegalAccessException e){ + System.out.println("Mosaic_Dispatcher::read exception:"); + e.printStackTrace(); + return null; + }catch(Throwable t){ + System.out.println("Mosaic_Dispatcher::read exception:"); + t.printStackTrace(); + return null; + } + } + + public void write(String field_name ,T value){ + try{ + test_print("Call to Mosaic_Dispatcher::write(field_name ,value)"); + + MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); + Field field = target.getDeclaredField(field_name); + MethodHandle handle = lookup.unreflectSetter(field); + handle.invoke(value); + + }catch(NoSuchFieldException | IllegalAccessException e){ + System.out.println("Mosaic_Dispatcher::write static field exception:"); + e.printStackTrace(); + }catch(Throwable t){ + System.out.println("Mosaic_Dispatcher::write static field exception:"); + t.printStackTrace(); + } + } + + public void write(Object instance ,String field_name ,T value){ + try{ + test_print("Call to Mosaic_Dispatcher::write(instance ,field_name ,value)"); + + MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); + Field field = target.getDeclaredField(field_name); + + if(instance == null || !target.isInstance(instance)){ + throw new IllegalArgumentException + ( + "Mosaic_Dispatcher::write provided instance is not of target type: " + + target.getName() + + ", but received: " + + (instance == null ? "null" : instance.getClass().getName()) + ); + } + MethodHandle handle = lookup.unreflectSetter(field); + handle.bindTo(instance).invoke(value); + + }catch(NoSuchFieldException | IllegalAccessException e){ + System.out.println("Mosaic_Dispatcher::write instance field exception:"); + e.printStackTrace(); + }catch(Throwable t){ + System.out.println("Mosaic_Dispatcher::write instance field exception:"); + t.printStackTrace(); + } + } + + @SuppressWarnings("unchecked") + public T make(Object... arg_list){ + test_print("Call to Mosaic_Dispatcher::make"); + + // Use dispatch_1 to invoke the constructor + Object result = dispatch_1( + null // no instance for constructor + ,void.class // return type for signature matching + ,"" // constructors are always named `` in Java + ,arg_list + ); + + // Cast the result to the target type + return (T) target.cast(result); + } + + // dispatch static methods + public T dispatch + ( + Class return_type + ,String method_name + ,Object... arg_list + ){ + test_print("Call to Mosaic_Dispatcher::dispatch for a static method."); + return dispatch_1 + ( + null // No instance for static methods + ,return_type // Return type + ,method_name // Method name + ,arg_list // Argument list + ); + } + + // dispatch instance binded methods + public T dispatch + ( + Object instance, + Class return_type, + String method_name, + Object... arg_list + ){ + test_print("Call to Mosaic_Dispatcher::dispatch for a method bound to an instance."); + if(instance == null || !target.isInstance(instance)){ + throw new IllegalArgumentException + ( + "Provided instance is not of target type: " + + target.getName() + + ", but received: " + + (instance == null ? "null" : instance.getClass().getName()) + ); + } + return dispatch_1(instance ,return_type ,method_name ,arg_list); + } + + @SuppressWarnings("unchecked") + private T dispatch_1( + Object instance, + Class return_type, + String method_name, + Object... arg_list + ){ + try{ + if(arg_list == null){ + arg_list = new Object[0]; // Treat null as an empty argument list + } + + // Resolve method/constructor signature + MethodSignature signature = new MethodSignature( + return_type, + to_string_target(), + method_name, + arg_list + ); + test_print("dispatch_1:: signature key:" + signature.toString()); + + MethodHandle handle = map.lookup(signature); + + if(handle == null){ + throw new NoSuchMethodException("No method or constructor found for signature: " + signature.toString()); + } + + // Strip off any IsPrimitive tags + Object[] untagged_arg_list = new Object[arg_list.length]; + for(int i = 0; i < arg_list.length; i++){ + if(arg_list[i] instanceof Mosaic_IsPrimitive){ + untagged_arg_list[i] = ((Mosaic_IsPrimitive) arg_list[i]).get_value(); + }else{ + untagged_arg_list[i] = arg_list[i]; + } + } + + // call the Handle and cast the result + // + if("".equals(method_name)){ + // Constructor invocation + return (T) target.cast(handle.invokeWithArguments(untagged_arg_list)); + } + + if(return_type == void.class || return_type == null){ + if(instance == null){ + // static method call + handle.invokeWithArguments(untagged_arg_list); + }else{ + // method bound to instance call + handle.bindTo(instance).invokeWithArguments(untagged_arg_list); + } + return null; // generic code void return type must return null + } + + Object result; + if(instance == null){ + // static method call + result = handle.invokeWithArguments(untagged_arg_list); + }else{ + // method bound to instance call + result = handle.bindTo(instance).invokeWithArguments(untagged_arg_list); // Instance method + } + + if(result == null) return null; + + // Handle primitive return types explicitly + if(return_type.isPrimitive()){ + if(return_type == boolean.class) return(T)(Boolean) result; + if(return_type == int.class) return(T)(Integer) result; + if(return_type == double.class) return(T)(Double) result; + if(return_type == float.class) return(T)(Float) result; + if(return_type == long.class) return(T)(Long) result; + if(return_type == short.class) return(T)(Short) result; + if(return_type == byte.class) return(T)(Byte) result; + if(return_type == char.class) return(T)(Character) result; + } + + // For non-primitives, cast normally + return return_type.cast(result); + + }catch(Throwable t){ + System.out.println("Mosaic_Dispatcher::dispatch exception:"); + t.printStackTrace(); + return null; + } + } + + // standard interface + // + @Override + public String toString(){ + return + "Mosaic_Dispatcher{" + + "target=" + + to_string_target() + + " ,map=" + + map.toString() + + "}" + ; + } + +} diff --git a/developer/javac/Mosaic_IO.java b/developer/javac/Mosaic_IO.java new file mode 100644 index 0000000..fe6bdff --- /dev/null +++ b/developer/javac/Mosaic_IO.java @@ -0,0 +1,144 @@ +package com.ReasoningTechnology.Mosaic; +/* + The primary purpose of this class is to redirect I/O to buffers, + sot that a test can check the I/O behavior of a function under test. +*/ + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.FileOutputStream; +import java.io.FileInputStream; +import java.io.FileDescriptor; +import java.io.PrintStream; +import java.io.InputStream; + +public class Mosaic_IO{ + + private PrintStream original_out; + private PrintStream original_err; + private InputStream original_in; + + private ByteArrayOutputStream out_content; + private ByteArrayOutputStream err_content; + private ByteArrayInputStream in_content; + private Boolean streams_foobar = false; + private Boolean uninitialized = true; + + + // IO currently has no constructors defined, uses default + + + // Redirects IO streams, logs and handles errors if redirection fails. + // + // Most tests do not do I/O checks, so rather than throwing an error + // it will set the streams_foobar flag, then throw an error if the I/O + // functions are used. + // + // This is the only method that can set the streams_foobar flag. + public Boolean redirect(){ + + try{ + original_out = System.out; + original_err = System.err; + original_in = System.in; + + out_content = new ByteArrayOutputStream(); + err_content = new ByteArrayOutputStream(); + in_content = new ByteArrayInputStream(new byte[0]); + + System.setOut( new PrintStream(out_content) ); + System.setErr( new PrintStream(err_content) ); + System.setIn(in_content); + + uninitialized = false; + return true; + + } catch(Exception e){ + restore_hard(); + streams_foobar = true; + return false; + + } + } + + // Hard restore of the streams, resetting to system defaults + public void restore_hard(){ + System.setOut(new PrintStream( new FileOutputStream(FileDescriptor.out)) ); + System.setErr(new PrintStream( new FileOutputStream(FileDescriptor.err))) ; + System.setIn(new FileInputStream(FileDescriptor.in)); + } + + // Restores original IO streams, ensuring foobar and uninitialized states are checked. + // If anything goes wrong reverse to restore_hard. + public void restore(){ + if(uninitialized || streams_foobar){ + restore_hard(); + return; + } + try{ + System.setOut(original_out); + System.setErr(original_err); + System.setIn(original_in); + } catch(Throwable e){ + restore_hard(); + } + } + + // Clears output, error, and input buffers, checks for foobar state only. + public void clear_buffers(){ + if(streams_foobar){ + throw new IllegalStateException("Cannot clear buffers: IO object is in foobar state."); + } + out_content.reset(); + err_content.reset(); + in_content = new ByteArrayInputStream( new byte[0] ); // Reset to EOF + System.setIn(in_content); + } + + public Boolean has_out_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stdout content: IO object is in foobar state." + ); + } + return out_content.size() > 0; + } + public String get_out_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stdout content: IO object is in foobar state." + ); + } + return out_content.toString(); + } + + public Boolean has_err_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stderr content: IO object is in foobar state." + ); + } + return err_content.size() > 0; + } + public String get_err_content(){ + if(streams_foobar){ + throw new IllegalStateException + ( + "Cannot access stderr content: IO object is in foobar state." + ); + } + return err_content.toString(); + } + + // Pushes input string onto stdin, checks foobar state only. + public void push_input(String input_data){ + if(streams_foobar){ + throw new IllegalStateException("Cannot push input: IO object is in foobar state."); + } + in_content = new ByteArrayInputStream( input_data.getBytes() ); + System.setIn(in_content); + } +} diff --git a/developer/javac/Mosaic_IsPrimitive.java b/developer/javac/Mosaic_IsPrimitive.java new file mode 100644 index 0000000..e0d6652 --- /dev/null +++ b/developer/javac/Mosaic_IsPrimitive.java @@ -0,0 +1,32 @@ +package com.ReasoningTechnology.Mosaic; + +public class Mosaic_IsPrimitive { + private final Object value; + + public Mosaic_IsPrimitive(Object value){ + this.value = value; + } + + public static Mosaic_IsPrimitive make(Object value) { + return new Mosaic_IsPrimitive(value); + } + + public Object get_value(){ + return value; + } + + public Class get_type(){ + if( value == null ) return null; + if( value instanceof Integer ) return int.class; + if( value instanceof Boolean ) return boolean.class; + if( value instanceof Double ) return double.class; + if( value instanceof Float ) return float.class; + if( value instanceof Long ) return long.class; + if( value instanceof Short ) return short.class; + if( value instanceof Byte ) return byte.class; + if( value instanceof Character ) return char.class; + return value.getClass(); + } + +} + diff --git a/developer/javac/Mosaic_Logger.java b/developer/javac/Mosaic_Logger.java new file mode 100644 index 0000000..1abb5c1 --- /dev/null +++ b/developer/javac/Mosaic_Logger.java @@ -0,0 +1,52 @@ +package com.ReasoningTechnology.Mosaic; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Mosaic_Logger{ + + private static final Logger LOGGER = LoggerFactory.getLogger(Mosaic_Logger.class); + + // Formats and logs an output related to a specific test + public static void output(String test_name, String stream, String output_data){ + String timestamp = Mosaic_Time.iso_UTC_time(); + String formatted_log = String.format( + "\n%s -----------------------------------------------------------\n" + + "Test: %s\n" + + "Stream: %s\n" + + "Output:\n%s\n", + timestamp, test_name, stream, output_data + ); + + LOGGER.info(formatted_log); + } + + // Logs a general message for a test + public static void message(String test_name, String message){ + String timestamp = Mosaic_Time.iso_UTC_time(); + String formatted_log = String.format( + "\n%s -----------------------------------------------------------\n" + + "Test: %s\n" + + "Message:\n%s\n", + timestamp, test_name, message + ); + + LOGGER.info(formatted_log); + } + + public static void error(String test_name, String message, Throwable error){ + String timestamp = Mosaic_Time.iso_UTC_time(); + String formatted_log = String.format( + "\n%s -----------------------------------------------------------\n" + + "Test: %s\n" + + "Message:\n%s\n" + + "Error:\n", + timestamp, test_name, message + ); + + // Pass the Throwable 'error' as the last argument to LOGGER.error. + // This automatically logs the stack trace at the ERROR level. + LOGGER.error(formatted_log, error); + } + +} diff --git a/developer/javac/Mosaic_Mosaic.java b/developer/javac/Mosaic_Mosaic.java new file mode 100644 index 0000000..51e57d0 --- /dev/null +++ b/developer/javac/Mosaic_Mosaic.java @@ -0,0 +1,27 @@ +package com.ReasoningTechnology.Mosaic; + +/* +The Mosaic shell callable wrapper is currently a placeholder. Perhaps someday we +can find something for this to do. + +*/ + + +public class Mosaic_Mosaic{ + + public static Boolean test_is_true(){ + return true; + } + + public static int run(){ + System.out.println("Main function placeholder. Currently Mosaic is used by extending the TestBench class."); + return 0; + } + + public static void main(String[] args){ + int return_code = run(); + System.exit(return_code); + return; + } + +} diff --git a/developer/javac/Mosaic_Quantifier.java b/developer/javac/Mosaic_Quantifier.java new file mode 100644 index 0000000..493b3c0 --- /dev/null +++ b/developer/javac/Mosaic_Quantifier.java @@ -0,0 +1,42 @@ +package com.ReasoningTechnology.Mosaic; + +import java.util.function.Predicate; + +public class Mosaic_Quantifier{ + + // Linear search with a predicate + public static T find( T[] elements ,Predicate predicate ){ + for( T element : elements ){ + if( predicate.test( element )) return element; // Return the first match + } + return null; // Return null if no element satisfies the predicate + } + + // True when it does a search and finds a true value; otherwise false. + public static Boolean exists( Object[] elements ){ + return elements.length > 0 && find( elements ,element -> (element instanceof Boolean) && (Boolean) element ) != null; + } + + // True when it does a search and does not find a false value; otherwise false. + // Hence, all true for the empty set is false, which is appropriate for testing. + public static Boolean all( Object[] elements ){ + return elements.length > 0 && find( elements ,element -> !(element instanceof Boolean) || !(Boolean) element ) == null; + } + + public static void all_set_false( Boolean[] condition_list ){ + int i = 0; + while(i < condition_list.length){ + condition_list[i] = false; + i++; + } + } + + public static void all_set_true( Boolean[] condition_list ){ + int i = 0; + while(i < condition_list.length){ + condition_list[i] = true; + i++; + } + } + +} diff --git a/developer/javac/Mosaic_Testbench.java b/developer/javac/Mosaic_Testbench.java new file mode 100644 index 0000000..777233f --- /dev/null +++ b/developer/javac/Mosaic_Testbench.java @@ -0,0 +1,106 @@ +package com.ReasoningTechnology.Mosaic; + +import java.lang.reflect.Method; + +public class Mosaic_Testbench { + + /* -------------------------------------------------------------------------------- + Validate the structure of a test method + */ + public static Boolean method_is_wellformed(Method method){ + // Check if the method returns Boolean + if(!method.getReturnType().equals(Boolean.class)){ + System.out.println("Structural problem: " + method.getName() + " does not return Boolean."); + return false; + } + + // Check if the method has exactly one argument of type Mosaic_IO + Class[] parameterTypes = method.getParameterTypes(); + if(parameterTypes == null || parameterTypes.length != 1 || !parameterTypes[0].equals(Mosaic_IO.class)){ + System.out.println("Structural problem: " + method.getName() + " does not accept a single Mosaic_IO argument."); + return false; + } + + return true; + } + + /* -------------------------------------------------------------------------------- + Run a single test method + */ + public static Boolean run_test(Object test_suite, Method method, Mosaic_IO io){ + String test_name = method.getName(); + + // Tracking possible test failures + Boolean fail_malformed = false; + Boolean fail_reported = false; + Boolean fail_exception = false; + Boolean fail_extraneous_stdout = false; + Boolean fail_extraneous_stderr = false; + String exception_string = ""; + + // Validate method structure + if(!method_is_wellformed(method)){ + System.out.println("Error: " + test_name + " has an invalid structure."); + return false; + } + + // Redirect I/O + Boolean successful_redirect = io.redirect(); + if(successful_redirect){ + io.clear_buffers(); // Start each test with empty buffers + } else { + Mosaic_Logger.message(test_name, "Error: I/O redirection failed before running the test."); + System.out.println("Warning: Failed to redirect I/O for test: " + test_name); + } + + // Run the test and catch any exceptions + try{ + Object result = method.invoke(test_suite, io); + fail_reported = !Boolean.TRUE.equals(result); // Test passes only if it returns exactly `true` + fail_extraneous_stdout = io.has_out_content(); + fail_extraneous_stderr = io.has_err_content(); + } catch(Exception e){ + fail_exception = true; + exception_string = e.toString(); + } finally{ + io.restore(); + } + + // Report results + if(fail_reported) System.out.println("Test failed: '" + test_name + "' reported failure."); + if(fail_exception) System.out.println("Test failed: '" + test_name + "' threw an exception: " + exception_string); + if(fail_extraneous_stdout){ + System.out.println("Test failed: '" + test_name + "' produced extraneous stdout."); + Mosaic_Logger.output(test_name, "stdout", io.get_out_content()); + } + if(fail_extraneous_stderr){ + System.out.println("Test failed: '" + test_name + "' produced extraneous stderr."); + Mosaic_Logger.output(test_name, "stderr", io.get_err_content()); + } + + // Determine final test result + return !(fail_reported || fail_exception || fail_extraneous_stdout || fail_extraneous_stderr); + } + + /* -------------------------------------------------------------------------------- + Run all tests in the test suite + */ + public static int run(Object test_suite){ + int failed_tests = 0; + int passed_tests = 0; + Method[] methods = test_suite.getClass().getDeclaredMethods(); + Mosaic_IO io = new Mosaic_IO(); + + for(Method method : methods){ + if(run_test(test_suite, method, io)) passed_tests++; else failed_tests++; + } + + // Summary of test results + System.out.println("Total tests run: " + (passed_tests + failed_tests)); + System.out.println("Total tests passed: " + passed_tests); + System.out.println("Total tests failed: " + failed_tests); + + return (failed_tests > 0) ? 1 : 0; + } + +} diff --git a/developer/javac/Mosaic_Time.java b/developer/javac/Mosaic_Time.java new file mode 100644 index 0000000..6e30236 --- /dev/null +++ b/developer/javac/Mosaic_Time.java @@ -0,0 +1,13 @@ +package com.ReasoningTechnology.Mosaic; + +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; + +public class Mosaic_Time{ + + public static String iso_UTC_time(){ + return Instant.now().atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT); + } + +} diff --git "a/developer/javac\360\237\226\211/Mosaic_Dispatcher.java" "b/developer/javac\360\237\226\211/Mosaic_Dispatcher.java" deleted file mode 100644 index b3b166e..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_Dispatcher.java" +++ /dev/null @@ -1,693 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; - - -/*-------------------------------------------------------------------------------- - Is a signature for a Method - - The envisioned use case is the 'method signature' -> handle map. - - Perhaps the existing method signature in the Reflection library can - replace this, but most of the work done here is the formatting done - in the constructors. -*/ - -class MethodSignature{ - // header - private Class return_type; - private String class_name; - private String method_name; - - // variable length parameter type list - private Class[] parameter_type_list; - - // field access and strings - // - public String get_class_name(){ - return class_name; - } - - public String get_method_name(){ - return method_name; - } - - public Class get_return_type(){ - return return_type; - } - - public Class[] get_parameter_type_list(){ - return parameter_type_list; - } - - public String to_string_return_type(){ - return get_return_type() != null ? get_return_type().getSimpleName() : "null"; - } - - public String to_string_parameter_type_list(){ - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < get_parameter_type_list().length; i++){ - sb.append(get_parameter_type_list()[i] != null ? get_parameter_type_list()[i].getSimpleName() : "null"); - if (i < get_parameter_type_list().length - 1) sb.append(" ,"); - } - return sb.toString(); - } - - public String to_string_signature_header(){ - return to_string_return_type() + " " + get_class_name() + "." + get_method_name(); - } - - // constructors - // - private void init_header - ( - Class return_type - ,String class_name - ,String method_name - ){ - this.return_type = return_type; - this.class_name = class_name; - this.method_name = method_name; - } - - // Signature when given a Method. - // Used when putting methods into the method signature to handle map. - public MethodSignature(Method method){ - init_header - ( - method.getReturnType() - ,method.getDeclaringClass().getName() - ,method.getName() - ); - this.parameter_type_list = method.getParameterTypes(); - } - - // Signature when given a parameter type list. - // Used putting constructors into the signature to handle map. - public MethodSignature - ( - Class return_type - ,String class_name - ,String method_name - ,Class[] parameter_type_list - ){ - init_header(return_type ,class_name ,method_name); - this.parameter_type_list = parameter_type_list; - } - - // Signature when given an argument value list. - // Used by `invoke`. - public MethodSignature - ( - Class return_type - ,String class_name - ,String method_name - ,Object[] arg_list - ){ - init_header(return_type ,class_name ,method_name); - - // Set the signature parameter type to the argument type. - // No automatic conversions are applied. - this.parameter_type_list = new Class[arg_list.length]; // Initialize the array - for(int i = 0; i < arg_list.length; i++){ - if(arg_list[i] instanceof Mosaic_IsPrimitive){ - parameter_type_list[i] =( (Mosaic_IsPrimitive) arg_list[i] ).get_type(); - } else if(arg_list[i] != null){ - parameter_type_list[i] = arg_list[i].getClass(); - } else{ - parameter_type_list[i] = null; - } - } - } - - // standard interface - // - @Override - public String toString(){ - return to_string_signature_header() + "(" + to_string_parameter_type_list() + ")"; - } - - @Override - public boolean equals(Object o){ - if(this == o) return true; - if(o == null) return false; - if(o.getClass() != MethodSignature.class) return false; - - MethodSignature signature = (MethodSignature) o; - - return - get_class_name().equals(signature.get_class_name()) - && get_method_name().equals(signature.get_method_name()) - && get_return_type().equals(signature.get_return_type()) - && Arrays.equals(get_parameter_type_list() ,signature.get_parameter_type_list()); - } - - @Override - public int hashCode(){ - int result = get_class_name().hashCode(); - result = 31 * result + get_method_name().hashCode(); - result = 31 * result + get_return_type().hashCode(); - result = 31 * result + Arrays.hashCode(get_parameter_type_list()); - return result; - } - -} - -/*-------------------------------------------------------------------------------- -This is a method signature to callable method handle dictionary. - -In the envisioned use case there is one such dictionary per -Dispatcher instance. - - */ -class MethodSignature_To_Handle_Map{ - - // Static test messaging - // - private static boolean test = false; - public static void test_switch(boolean test){ - if (MethodSignature_To_Handle_Map.test && !test){ - test_print("MethodSignature_To_Handle_Map:: test messages off"); - } - if (!MethodSignature_To_Handle_Map.test && test){ - test_print("MethodSignature_To_Handle_Map:: test messages on"); - } - MethodSignature_To_Handle_Map.test = test; - } - private static void test_print(String message){ - if(test){ - System.out.println(message); - } - } - - // instance data - // - private Map map; - - // field access and strings - // - - // constructors - // - public MethodSignature_To_Handle_Map(){ - map = new HashMap<>(); - } - - // methods for adding entries - // - public void add_class(Class class_metadata){ - test_print("MethodSignature_To_Handle_Map::add_class adding methods"); - add_methods(class_metadata); - - test_print("MethodSignature_To_Handle_Map::add_class adding constructors"); - add_constructors(class_metadata); - - test_print("MethodSignature_To_Handle_Map::add_class adding fields"); - // add_fields(class_metadata); - } - - private void add_entry(MethodSignature key ,MethodHandle value){ - test_print - ( - "(add_entry:: " + "(key " + key + ") " + "(value " + value + ")" + ")" - ); - map.put(key ,value); - } - - public void add_methods(Class class_metadata){ - try{ - MethodHandles.Lookup lookup = MethodHandles.lookup(); - MethodHandles.Lookup private_lookup = MethodHandles.privateLookupIn(class_metadata,lookup); - - for(Method method:class_metadata.getDeclaredMethods()){ - try{ - Class[] parameter_type_list=method.getParameterTypes(); - MethodSignature signature=new MethodSignature( - method.getReturnType(), - class_metadata.getName(), - method.getName(), - parameter_type_list - ); - - MethodType method_type=MethodType.methodType(method.getReturnType(),parameter_type_list); - MethodHandle method_handle; - - if((method.getModifiers() & Modifier.STATIC) != 0){ - method_handle = private_lookup.findStatic(class_metadata ,method.getName() ,method_type); - }else{ - method_handle = private_lookup.findSpecial(class_metadata ,method.getName() ,method_type ,class_metadata); - } - - add_entry(signature,method_handle); - - }catch(IllegalAccessException|NoSuchMethodException e){ - System.err.println - ( - "Mosaic_Dispatcher::add_methods unexpectedly failed to register method: " - + method.getName() + " in class: " + class_metadata.getName() - ); - e.printStackTrace(); - } - } - - }catch(IllegalAccessException e){ - System.err.println("Mosaic_Dispatcher::add_methods unexpectedly failed to initialize lookup for class: "+class_metadata.getName()); - e.printStackTrace(); - } - } - - public void add_constructors(Class class_metadata){ - try{ - - MethodHandles.Lookup lookup = MethodHandles.lookup(); - MethodHandles.Lookup private_lookup = MethodHandles.privateLookupIn(class_metadata ,lookup); - - for( Constructor constructor : class_metadata.getDeclaredConstructors() ){ - try{ - - Class[] parameter_type_list = constructor.getParameterTypes(); - MethodType method_type = MethodType.methodType(void.class ,parameter_type_list); - MethodHandle constructor_handle = private_lookup.findConstructor(class_metadata ,method_type); - - // Signature for constructors: with parameter types - MethodSignature signature = new MethodSignature - ( - void.class - ,class_metadata.getName() - ,"" - ,parameter_type_list - ); - add_entry(signature ,constructor_handle); - - }catch(IllegalAccessException|NoSuchMethodException e){ - System.err.println("Mosaic_Dispatcher::add_constructors unexpectedly failed to register constructor: " + class_metadata.getName()); - e.printStackTrace(); - } - } - - }catch(IllegalAccessException e){ - System.err.println("Mosaic_Dispatcher::add_constructors unexpectedly failed to initialize lookup for class: " + class_metadata.getName()); - e.printStackTrace(); - } - } - - public void add_fields(Class class_metadata){ - try{ - MethodHandles.Lookup lookup = MethodHandles.lookup(); - MethodHandles.Lookup private_lookup = MethodHandles.privateLookupIn(class_metadata ,lookup); - - for(Field field : class_metadata.getDeclaredFields()){ - try{ - // Field Metadata - String field_name = field.getName(); - Class field_type = field.getType(); - - // Create MethodHandle - MethodHandle read_handle = private_lookup.unreflectGetter(field); - MethodSignature read_signature = new MethodSignature - ( - field_type - ,class_metadata.getName() - ,"" - ,new Class[]{} - ); - add_entry(read_signature ,read_handle); - - // Create MethodHandle - MethodHandle write_handle = private_lookup.unreflectSetter(field); - MethodSignature write_signature = new MethodSignature - ( - void.class - ,class_metadata.getName() - ,"" - ,new Class[]{field_type} - ); - add_entry(write_signature ,write_handle); - - }catch(IllegalAccessException e){ - System.err.println("Mosaic_Dispatcher::add_fields unexpectedly failed to register field: " + field.getName()); - e.printStackTrace(); - } - } - }catch(IllegalAccessException e){ - System.err.println("Mosaic_Dispatcher::add_fields unexpectedly failed to initialize lookup for class: " + class_metadata.getName()); - e.printStackTrace(); - } - } - - - // methods for looking up handles - // - public MethodHandle lookup(MethodSignature s){ - return map.get(s); - } - - // standard interface - // - @Override - public String toString(){ - StringBuilder sb = new StringBuilder(); - sb.append("MethodSignature_To_Handle_Map:{").append(System.lineSeparator()); - - for(Map.Entry entry : map.entrySet()){ - sb.append(" ") - .append(entry.getKey().toString()) // MethodSignature's toString - .append(" -> ") - .append(entry.getValue().toString()) // MethodHandle's toString - .append(System.lineSeparator()); - } - - sb.append("}"); - return sb.toString(); - } - -} - -/*-------------------------------------------------------------------------------- - Given a class, dispatches calls to methods. - -*/ -public class Mosaic_Dispatcher{ - - // Static test messaging - // - private static boolean test = false; - public static void test_switch(boolean test){ - if(Mosaic_Dispatcher.test && !test){ - test_print("Mosaic_Dispatcher:: test messages off"); - } - if(!Mosaic_Dispatcher.test && test){ - test_print("Mosaic_Dispatcher:: test messages on"); - MethodSignature_To_Handle_Map.test_switch(true); - } - Mosaic_Dispatcher.test = test; - } - public static void test_print(String message){ - if(test){ - System.out.println(message); - } - } - - // instance data - // - private MethodSignature_To_Handle_Map map; - private Class target; - - - // field access and strings - // - public Class get_target(){ - return target; - } - - public MethodSignature_To_Handle_Map get_map(){ - return map; - } - - public String to_string_target(){ - return target != null ? target.getName() : "null"; - } - - // constructors - // - - // construct given the class metadata for the target class - public Mosaic_Dispatcher(Class target){ - this.map = new MethodSignature_To_Handle_Map(); - this.target = target; - test_print("Mosaic_Dispatcher:: mapping methods given class_metadata object: " + to_string_target()); - this.map.add_class(target); - } - - // Constructor accepting a fully qualified class name of the target class - public Mosaic_Dispatcher(String fully_qualified_class_name) throws ClassNotFoundException{ - this.map = new MethodSignature_To_Handle_Map(); - this.target = Class.forName(fully_qualified_class_name); - test_print("Mosaic_Dispatcher:: mapping methods from class specified by string: \"" + to_string_target() + "\""); - this.map.add_class(target); - } - - // methods unique to the class - // - public T read(String field_name){ - try{ - test_print("Call to Mosaic_Dispatcher::read( field_name )"); - - MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); - Field field = target.getDeclaredField(field_name); - MethodHandle handle = lookup.unreflectGetter(field); - return (T) handle.invoke(); - - }catch(NoSuchFieldException | IllegalAccessException e){ - System.out.println("Mosaic_Dispatcher::read of static exception:"); - e.printStackTrace(); - return null; - }catch(Throwable t){ - System.out.println("Mosaic_Dispatcher::read of static exception:"); - t.printStackTrace(); - return null; - } - } - - public T read(Object instance ,String field_name){ - try{ - test_print("Call to Mosaic_Dispatcher::read(instance ,field_name)"); - - MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); - Field field = target.getDeclaredField(field_name); - - if(instance == null || !target.isInstance(instance)){ - throw new IllegalArgumentException - ( - "Mosaic_Dispatcher::read provided instance is not of target type: " - + target.getName() - + ", but received: " - + (instance == null ? "null" : instance.getClass().getName()) - ); - } - MethodHandle handle = lookup.unreflectGetter(field); - return (T) handle.bindTo(instance).invoke(); - - }catch(NoSuchFieldException | IllegalAccessException e){ - System.out.println("Mosaic_Dispatcher::read exception:"); - e.printStackTrace(); - return null; - }catch(Throwable t){ - System.out.println("Mosaic_Dispatcher::read exception:"); - t.printStackTrace(); - return null; - } - } - - public void write(String field_name ,T value){ - try{ - test_print("Call to Mosaic_Dispatcher::write(field_name ,value)"); - - MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); - Field field = target.getDeclaredField(field_name); - MethodHandle handle = lookup.unreflectSetter(field); - handle.invoke(value); - - }catch(NoSuchFieldException | IllegalAccessException e){ - System.out.println("Mosaic_Dispatcher::write static field exception:"); - e.printStackTrace(); - }catch(Throwable t){ - System.out.println("Mosaic_Dispatcher::write static field exception:"); - t.printStackTrace(); - } - } - - public void write(Object instance ,String field_name ,T value){ - try{ - test_print("Call to Mosaic_Dispatcher::write(instance ,field_name ,value)"); - - MethodHandles.Lookup lookup = MethodHandles.privateLookupIn(target ,MethodHandles.lookup()); - Field field = target.getDeclaredField(field_name); - - if(instance == null || !target.isInstance(instance)){ - throw new IllegalArgumentException - ( - "Mosaic_Dispatcher::write provided instance is not of target type: " - + target.getName() - + ", but received: " - + (instance == null ? "null" : instance.getClass().getName()) - ); - } - MethodHandle handle = lookup.unreflectSetter(field); - handle.bindTo(instance).invoke(value); - - }catch(NoSuchFieldException | IllegalAccessException e){ - System.out.println("Mosaic_Dispatcher::write instance field exception:"); - e.printStackTrace(); - }catch(Throwable t){ - System.out.println("Mosaic_Dispatcher::write instance field exception:"); - t.printStackTrace(); - } - } - - @SuppressWarnings("unchecked") - public T make(Object... arg_list){ - test_print("Call to Mosaic_Dispatcher::make"); - - // Use dispatch_1 to invoke the constructor - Object result = dispatch_1( - null // no instance for constructor - ,void.class // return type for signature matching - ,"" // constructors are always named `` in Java - ,arg_list - ); - - // Cast the result to the target type - return (T) target.cast(result); - } - - // dispatch static methods - public T dispatch - ( - Class return_type - ,String method_name - ,Object... arg_list - ){ - test_print("Call to Mosaic_Dispatcher::dispatch for a static method."); - return dispatch_1 - ( - null // No instance for static methods - ,return_type // Return type - ,method_name // Method name - ,arg_list // Argument list - ); - } - - // dispatch instance binded methods - public T dispatch - ( - Object instance, - Class return_type, - String method_name, - Object... arg_list - ){ - test_print("Call to Mosaic_Dispatcher::dispatch for a method bound to an instance."); - if(instance == null || !target.isInstance(instance)){ - throw new IllegalArgumentException - ( - "Provided instance is not of target type: " - + target.getName() - + ", but received: " - + (instance == null ? "null" : instance.getClass().getName()) - ); - } - return dispatch_1(instance ,return_type ,method_name ,arg_list); - } - - @SuppressWarnings("unchecked") - private T dispatch_1( - Object instance, - Class return_type, - String method_name, - Object... arg_list - ){ - try{ - if(arg_list == null){ - arg_list = new Object[0]; // Treat null as an empty argument list - } - - // Resolve method/constructor signature - MethodSignature signature = new MethodSignature( - return_type, - to_string_target(), - method_name, - arg_list - ); - test_print("dispatch_1:: signature key:" + signature.toString()); - - MethodHandle handle = map.lookup(signature); - - if(handle == null){ - throw new NoSuchMethodException("No method or constructor found for signature: " + signature.toString()); - } - - // Strip off any IsPrimitive tags - Object[] untagged_arg_list = new Object[arg_list.length]; - for(int i = 0; i < arg_list.length; i++){ - if(arg_list[i] instanceof Mosaic_IsPrimitive){ - untagged_arg_list[i] = ((Mosaic_IsPrimitive) arg_list[i]).get_value(); - }else{ - untagged_arg_list[i] = arg_list[i]; - } - } - - // call the Handle and cast the result - // - if("".equals(method_name)){ - // Constructor invocation - return (T) target.cast(handle.invokeWithArguments(untagged_arg_list)); - } - - if(return_type == void.class || return_type == null){ - if(instance == null){ - // static method call - handle.invokeWithArguments(untagged_arg_list); - }else{ - // method bound to instance call - handle.bindTo(instance).invokeWithArguments(untagged_arg_list); - } - return null; // generic code void return type must return null - } - - Object result; - if(instance == null){ - // static method call - result = handle.invokeWithArguments(untagged_arg_list); - }else{ - // method bound to instance call - result = handle.bindTo(instance).invokeWithArguments(untagged_arg_list); // Instance method - } - - if(result == null) return null; - - // Handle primitive return types explicitly - if(return_type.isPrimitive()){ - if(return_type == boolean.class) return(T)(Boolean) result; - if(return_type == int.class) return(T)(Integer) result; - if(return_type == double.class) return(T)(Double) result; - if(return_type == float.class) return(T)(Float) result; - if(return_type == long.class) return(T)(Long) result; - if(return_type == short.class) return(T)(Short) result; - if(return_type == byte.class) return(T)(Byte) result; - if(return_type == char.class) return(T)(Character) result; - } - - // For non-primitives, cast normally - return return_type.cast(result); - - }catch(Throwable t){ - System.out.println("Mosaic_Dispatcher::dispatch exception:"); - t.printStackTrace(); - return null; - } - } - - // standard interface - // - @Override - public String toString(){ - return - "Mosaic_Dispatcher{" - + "target=" - + to_string_target() - + " ,map=" - + map.toString() - + "}" - ; - } - -} diff --git "a/developer/javac\360\237\226\211/Mosaic_IO.java" "b/developer/javac\360\237\226\211/Mosaic_IO.java" deleted file mode 100644 index fe6bdff..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_IO.java" +++ /dev/null @@ -1,144 +0,0 @@ -package com.ReasoningTechnology.Mosaic; -/* - The primary purpose of this class is to redirect I/O to buffers, - sot that a test can check the I/O behavior of a function under test. -*/ - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileOutputStream; -import java.io.FileInputStream; -import java.io.FileDescriptor; -import java.io.PrintStream; -import java.io.InputStream; - -public class Mosaic_IO{ - - private PrintStream original_out; - private PrintStream original_err; - private InputStream original_in; - - private ByteArrayOutputStream out_content; - private ByteArrayOutputStream err_content; - private ByteArrayInputStream in_content; - private Boolean streams_foobar = false; - private Boolean uninitialized = true; - - - // IO currently has no constructors defined, uses default - - - // Redirects IO streams, logs and handles errors if redirection fails. - // - // Most tests do not do I/O checks, so rather than throwing an error - // it will set the streams_foobar flag, then throw an error if the I/O - // functions are used. - // - // This is the only method that can set the streams_foobar flag. - public Boolean redirect(){ - - try{ - original_out = System.out; - original_err = System.err; - original_in = System.in; - - out_content = new ByteArrayOutputStream(); - err_content = new ByteArrayOutputStream(); - in_content = new ByteArrayInputStream(new byte[0]); - - System.setOut( new PrintStream(out_content) ); - System.setErr( new PrintStream(err_content) ); - System.setIn(in_content); - - uninitialized = false; - return true; - - } catch(Exception e){ - restore_hard(); - streams_foobar = true; - return false; - - } - } - - // Hard restore of the streams, resetting to system defaults - public void restore_hard(){ - System.setOut(new PrintStream( new FileOutputStream(FileDescriptor.out)) ); - System.setErr(new PrintStream( new FileOutputStream(FileDescriptor.err))) ; - System.setIn(new FileInputStream(FileDescriptor.in)); - } - - // Restores original IO streams, ensuring foobar and uninitialized states are checked. - // If anything goes wrong reverse to restore_hard. - public void restore(){ - if(uninitialized || streams_foobar){ - restore_hard(); - return; - } - try{ - System.setOut(original_out); - System.setErr(original_err); - System.setIn(original_in); - } catch(Throwable e){ - restore_hard(); - } - } - - // Clears output, error, and input buffers, checks for foobar state only. - public void clear_buffers(){ - if(streams_foobar){ - throw new IllegalStateException("Cannot clear buffers: IO object is in foobar state."); - } - out_content.reset(); - err_content.reset(); - in_content = new ByteArrayInputStream( new byte[0] ); // Reset to EOF - System.setIn(in_content); - } - - public Boolean has_out_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stdout content: IO object is in foobar state." - ); - } - return out_content.size() > 0; - } - public String get_out_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stdout content: IO object is in foobar state." - ); - } - return out_content.toString(); - } - - public Boolean has_err_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stderr content: IO object is in foobar state." - ); - } - return err_content.size() > 0; - } - public String get_err_content(){ - if(streams_foobar){ - throw new IllegalStateException - ( - "Cannot access stderr content: IO object is in foobar state." - ); - } - return err_content.toString(); - } - - // Pushes input string onto stdin, checks foobar state only. - public void push_input(String input_data){ - if(streams_foobar){ - throw new IllegalStateException("Cannot push input: IO object is in foobar state."); - } - in_content = new ByteArrayInputStream( input_data.getBytes() ); - System.setIn(in_content); - } -} diff --git "a/developer/javac\360\237\226\211/Mosaic_IsPrimitive.java" "b/developer/javac\360\237\226\211/Mosaic_IsPrimitive.java" deleted file mode 100644 index e0d6652..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_IsPrimitive.java" +++ /dev/null @@ -1,32 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -public class Mosaic_IsPrimitive { - private final Object value; - - public Mosaic_IsPrimitive(Object value){ - this.value = value; - } - - public static Mosaic_IsPrimitive make(Object value) { - return new Mosaic_IsPrimitive(value); - } - - public Object get_value(){ - return value; - } - - public Class get_type(){ - if( value == null ) return null; - if( value instanceof Integer ) return int.class; - if( value instanceof Boolean ) return boolean.class; - if( value instanceof Double ) return double.class; - if( value instanceof Float ) return float.class; - if( value instanceof Long ) return long.class; - if( value instanceof Short ) return short.class; - if( value instanceof Byte ) return byte.class; - if( value instanceof Character ) return char.class; - return value.getClass(); - } - -} - diff --git "a/developer/javac\360\237\226\211/Mosaic_Logger.java" "b/developer/javac\360\237\226\211/Mosaic_Logger.java" deleted file mode 100644 index 1abb5c1..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_Logger.java" +++ /dev/null @@ -1,52 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class Mosaic_Logger{ - - private static final Logger LOGGER = LoggerFactory.getLogger(Mosaic_Logger.class); - - // Formats and logs an output related to a specific test - public static void output(String test_name, String stream, String output_data){ - String timestamp = Mosaic_Time.iso_UTC_time(); - String formatted_log = String.format( - "\n%s -----------------------------------------------------------\n" + - "Test: %s\n" + - "Stream: %s\n" + - "Output:\n%s\n", - timestamp, test_name, stream, output_data - ); - - LOGGER.info(formatted_log); - } - - // Logs a general message for a test - public static void message(String test_name, String message){ - String timestamp = Mosaic_Time.iso_UTC_time(); - String formatted_log = String.format( - "\n%s -----------------------------------------------------------\n" + - "Test: %s\n" + - "Message:\n%s\n", - timestamp, test_name, message - ); - - LOGGER.info(formatted_log); - } - - public static void error(String test_name, String message, Throwable error){ - String timestamp = Mosaic_Time.iso_UTC_time(); - String formatted_log = String.format( - "\n%s -----------------------------------------------------------\n" + - "Test: %s\n" + - "Message:\n%s\n" + - "Error:\n", - timestamp, test_name, message - ); - - // Pass the Throwable 'error' as the last argument to LOGGER.error. - // This automatically logs the stack trace at the ERROR level. - LOGGER.error(formatted_log, error); - } - -} diff --git "a/developer/javac\360\237\226\211/Mosaic_Mosaic.java" "b/developer/javac\360\237\226\211/Mosaic_Mosaic.java" deleted file mode 100644 index 51e57d0..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_Mosaic.java" +++ /dev/null @@ -1,27 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -/* -The Mosaic shell callable wrapper is currently a placeholder. Perhaps someday we -can find something for this to do. - -*/ - - -public class Mosaic_Mosaic{ - - public static Boolean test_is_true(){ - return true; - } - - public static int run(){ - System.out.println("Main function placeholder. Currently Mosaic is used by extending the TestBench class."); - return 0; - } - - public static void main(String[] args){ - int return_code = run(); - System.exit(return_code); - return; - } - -} diff --git "a/developer/javac\360\237\226\211/Mosaic_Quantifier.java" "b/developer/javac\360\237\226\211/Mosaic_Quantifier.java" deleted file mode 100644 index 493b3c0..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_Quantifier.java" +++ /dev/null @@ -1,42 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -import java.util.function.Predicate; - -public class Mosaic_Quantifier{ - - // Linear search with a predicate - public static T find( T[] elements ,Predicate predicate ){ - for( T element : elements ){ - if( predicate.test( element )) return element; // Return the first match - } - return null; // Return null if no element satisfies the predicate - } - - // True when it does a search and finds a true value; otherwise false. - public static Boolean exists( Object[] elements ){ - return elements.length > 0 && find( elements ,element -> (element instanceof Boolean) && (Boolean) element ) != null; - } - - // True when it does a search and does not find a false value; otherwise false. - // Hence, all true for the empty set is false, which is appropriate for testing. - public static Boolean all( Object[] elements ){ - return elements.length > 0 && find( elements ,element -> !(element instanceof Boolean) || !(Boolean) element ) == null; - } - - public static void all_set_false( Boolean[] condition_list ){ - int i = 0; - while(i < condition_list.length){ - condition_list[i] = false; - i++; - } - } - - public static void all_set_true( Boolean[] condition_list ){ - int i = 0; - while(i < condition_list.length){ - condition_list[i] = true; - i++; - } - } - -} diff --git "a/developer/javac\360\237\226\211/Mosaic_Testbench.java" "b/developer/javac\360\237\226\211/Mosaic_Testbench.java" deleted file mode 100644 index 777233f..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_Testbench.java" +++ /dev/null @@ -1,106 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -import java.lang.reflect.Method; - -public class Mosaic_Testbench { - - /* -------------------------------------------------------------------------------- - Validate the structure of a test method - */ - public static Boolean method_is_wellformed(Method method){ - // Check if the method returns Boolean - if(!method.getReturnType().equals(Boolean.class)){ - System.out.println("Structural problem: " + method.getName() + " does not return Boolean."); - return false; - } - - // Check if the method has exactly one argument of type Mosaic_IO - Class[] parameterTypes = method.getParameterTypes(); - if(parameterTypes == null || parameterTypes.length != 1 || !parameterTypes[0].equals(Mosaic_IO.class)){ - System.out.println("Structural problem: " + method.getName() + " does not accept a single Mosaic_IO argument."); - return false; - } - - return true; - } - - /* -------------------------------------------------------------------------------- - Run a single test method - */ - public static Boolean run_test(Object test_suite, Method method, Mosaic_IO io){ - String test_name = method.getName(); - - // Tracking possible test failures - Boolean fail_malformed = false; - Boolean fail_reported = false; - Boolean fail_exception = false; - Boolean fail_extraneous_stdout = false; - Boolean fail_extraneous_stderr = false; - String exception_string = ""; - - // Validate method structure - if(!method_is_wellformed(method)){ - System.out.println("Error: " + test_name + " has an invalid structure."); - return false; - } - - // Redirect I/O - Boolean successful_redirect = io.redirect(); - if(successful_redirect){ - io.clear_buffers(); // Start each test with empty buffers - } else { - Mosaic_Logger.message(test_name, "Error: I/O redirection failed before running the test."); - System.out.println("Warning: Failed to redirect I/O for test: " + test_name); - } - - // Run the test and catch any exceptions - try{ - Object result = method.invoke(test_suite, io); - fail_reported = !Boolean.TRUE.equals(result); // Test passes only if it returns exactly `true` - fail_extraneous_stdout = io.has_out_content(); - fail_extraneous_stderr = io.has_err_content(); - } catch(Exception e){ - fail_exception = true; - exception_string = e.toString(); - } finally{ - io.restore(); - } - - // Report results - if(fail_reported) System.out.println("Test failed: '" + test_name + "' reported failure."); - if(fail_exception) System.out.println("Test failed: '" + test_name + "' threw an exception: " + exception_string); - if(fail_extraneous_stdout){ - System.out.println("Test failed: '" + test_name + "' produced extraneous stdout."); - Mosaic_Logger.output(test_name, "stdout", io.get_out_content()); - } - if(fail_extraneous_stderr){ - System.out.println("Test failed: '" + test_name + "' produced extraneous stderr."); - Mosaic_Logger.output(test_name, "stderr", io.get_err_content()); - } - - // Determine final test result - return !(fail_reported || fail_exception || fail_extraneous_stdout || fail_extraneous_stderr); - } - - /* -------------------------------------------------------------------------------- - Run all tests in the test suite - */ - public static int run(Object test_suite){ - int failed_tests = 0; - int passed_tests = 0; - Method[] methods = test_suite.getClass().getDeclaredMethods(); - Mosaic_IO io = new Mosaic_IO(); - - for(Method method : methods){ - if(run_test(test_suite, method, io)) passed_tests++; else failed_tests++; - } - - // Summary of test results - System.out.println("Total tests run: " + (passed_tests + failed_tests)); - System.out.println("Total tests passed: " + passed_tests); - System.out.println("Total tests failed: " + failed_tests); - - return (failed_tests > 0) ? 1 : 0; - } - -} diff --git "a/developer/javac\360\237\226\211/Mosaic_Time.java" "b/developer/javac\360\237\226\211/Mosaic_Time.java" deleted file mode 100644 index 6e30236..0000000 --- "a/developer/javac\360\237\226\211/Mosaic_Time.java" +++ /dev/null @@ -1,13 +0,0 @@ -package com.ReasoningTechnology.Mosaic; - -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; - -public class Mosaic_Time{ - - public static String iso_UTC_time(){ - return Instant.now().atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT); - } - -} diff --git a/developer/tool/bash_wrapper_list b/developer/tool/bash_wrapper_list new file mode 100755 index 0000000..7f07215 --- /dev/null +++ b/developer/tool/bash_wrapper_list @@ -0,0 +1,15 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="developer/tool🖉/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + +# list of classes that have main calls and get bash wrappers +echo Mosaic diff --git a/developer/tool/clean b/developer/tool/clean new file mode 100755 index 0000000..a32578f --- /dev/null +++ b/developer/tool/clean @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# Removes all files found in the build directories. It asks no questions as to +# how or why the files got there. Be especially careful with the 'bash' +# directory if you have authored scripts for release, add a `bash🖉` +# directory instead of putting them in `bash`. + +# input guards + env_must_be="developer/tool🖉/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + +# remove files + set -x + cd "$REPO_HOME"/developer + + # rm_na currently does not handle links correctly + rm -r scratchpad/* + + rm_na jvm/* + rm_na bash/* + set +x + +echo "$(script_fn) done." + diff --git a/developer/tool/env b/developer/tool/env new file mode 100644 index 0000000..48704b0 --- /dev/null +++ b/developer/tool/env @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="tool_shared/bespoke🖉/env" + error_bad_env=false + error_not_sourced=false + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + error_bad_env=true + fi + if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "$script_afp:: This script must be sourced, not executed." + error_not_sourced=true + fi + if $error_not_sourced; then exit 1; fi + if $error_bad_env; then return 1; fi + +# so we can do the build + +export PATH=\ +"$REPO_HOME"/developer/tool🖉/\ +:"$JAVA_HOME"/bin\ +:"$PATH" + + +# Developed sources always come from javac🖉. +# Everything else comes from scratchpad. +# Run `make` to compile sources to the scratchpad +# Run `release` to put the third party tools (that are included with the release) on the scratchpad. +# +export CLASSPATH=\ +"$JAVA_HOME"/lib\ +:"$REPO_HOME"/developer/log\ +:"$REPO_HOME"/developer/scratchpad\ +:$LOGGER_FACADE\ +:$LOGGER_CLASSIC\ +:$LOGGER_CORE\ +:"$CLASSPATH" + +export SOURCEPATH=\ +"$REPO_HOME"/developer/javac🖉/\ +:"$SOURCEPATH" + +export PATH=\ +"$REPO_HOME"/developer/bash\ +:"$PATH" + +# misc + + # make .githolder and .gitignore visible + alias ls="ls -a" + +# some feedback to show all went well + + export PROMPT_DECOR="$PROJECT"_developer + export ENV=$(script_fp) + echo ENV "$ENV" + cd "$REPO_HOME"/developer/ + + + diff --git a/developer/tool/gather_source_links b/developer/tool/gather_source_links new file mode 100755 index 0000000..689db3b --- /dev/null +++ b/developer/tool/gather_source_links @@ -0,0 +1,32 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# This script links the sources into the scratchpad directory tree according to the package directory hiearchy. + +# Input guards + + env_must_be="developer/tool🖉/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + +# Link sources into the package tree + + package_tree="scratchpad/com/ReasoningTechnology/$PROJECT" + mkdir -p "$package_tree" + echo "Package: $package_tree" + + echo -n "Linking:" + for source_file in javac🖉/*.java; do + echo -n " $(basename "$source_file")" + link_target="$package_tree/$(basename "$source_file")" + if [ ! -L "$link_target" ]; then + ln -s "$(realpath --relative-to="$package_tree" "$source_file")" "$link_target" + fi + done + echo "." + +echo "$(script_fp) done." diff --git a/developer/tool/gather_third_party b/developer/tool/gather_third_party new file mode 100755 index 0000000..1e81ace --- /dev/null +++ b/developer/tool/gather_third_party @@ -0,0 +1,42 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# This script expands Mosaic third party projects onto the scratchpad. This is done before releasing or running local ad hoc tests, so that the third party tools will be present. I.e. this is for creating a 'fat' jar. + +# Input guards + + env_must_be="developer/tool🖉/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + +# Expand the third party tools into the package tree + + echo "Expanding .jar files to be included with Mosaic into scratchpad." + + third_party_jars=( + "$LOGGER_FACADE" + "$LOGGER_CLASSIC" + "$LOGGER_CORE" + ) + + pushd scratchpad >& /dev/null + + for jar in "${third_party_jars[@]}"; do + if [ -f "$jar" ]; then + echo "including $jar" + jar -xf "$jar" + else + echo "Warning: JAR file not found: $jar" + fi + done + + # we are not currently using modules + rm -rf module-info.class + + + +echo "$(script_fp) done." diff --git a/developer/tool/make b/developer/tool/make new file mode 100755 index 0000000..df1ed8e --- /dev/null +++ b/developer/tool/make @@ -0,0 +1,36 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="developer/tool🖉/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + +echo "Compiling files..." + set -x + javac -Xlint:deprecation -g -d scratchpad javac🖉/*.java + set +x + if [ $? -ne 0 ]; then + echo "Compilation failed." + exit 1 + fi + +echo "Creating bash wrappers..." + mkdir -p bash + # wrapper is a space separated list + wrapper=$(bash_wrapper_list) + for file in $wrapper;do + cat > bash/$file << EOL +#!/bin/bash +java com.ReasoningTechnology."$PROJECT".$file +EOL + chmod +x bash/$file + done + +echo "$(script_fp) done." + diff --git a/developer/tool/release b/developer/tool/release new file mode 100755 index 0000000..c8c32fe --- /dev/null +++ b/developer/tool/release @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# before running this script, gather everything needed for the release on the scratchpad + +# input guards + + if [ -z "$REPO_HOME" ]; then + echo "$(script_fp):: REPO_HOME is not set." + exit 1 + fi + + env_must_be="developer/tool🖉/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: must be run in the $env_must_be environment" + exit 1 + fi + + cd "$REPO_HOME"/developer + + if [ ! -d scratchpad ]; then + echo "$(script_fp):: no scratchpad directory" + exit 1 + fi + + +# Inform the user + + echo "The pwd for this script is `pwd`." + +# Function to copy and set permissions + + install_file() { + source_fp="$1" + target_dp="$2" + perms="$3" + + target_file="$target_dp/$(basename "$source_fp")" + + if [ ! -f "$source_fp" ]; then + echo "install_file:: Source file '$source_fp' does not exist." + return 1 + fi + + if ! install -m "$perms" "$source_fp" "$target_file"; then + echo "Error: Failed to install $(basename "$source_fp") to $target_dp" + exit 1 + else + echo "Installed $(basename "$source_fp") to $target_dp with permissions $perms" + fi + } + +# scratchpad --> .jar file + + mkdir -p jvm + jar_file=$(realpath jvm/"$PROJECT".jar) + + pushd scratchpad + + echo "scratchpad -> $jar_file" + jar cf $jar_file * + if [ $? -ne 0 ]; then + echo "Failed to create $jar_file file." + exit 1 + fi + + popd + +# move files to the release dir + + release_dir="$REPO_HOME/release" + bash_dir="$REPO_HOME/developer/bash" + wrapper=$(bash_wrapper_list) + + install_file "$jar_file" "$release_dir" "ug+r" + + for wrapper in $wrapper; do + install_file "$bash_dir"/"$wrapper" "$release_dir" "ug+r+x" + done + +echo "$(script_fp) done." diff --git "a/developer/tool\360\237\226\211/bash_wrapper_list" "b/developer/tool\360\237\226\211/bash_wrapper_list" deleted file mode 100755 index 7f07215..0000000 --- "a/developer/tool\360\237\226\211/bash_wrapper_list" +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="developer/tool🖉/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - -# list of classes that have main calls and get bash wrappers -echo Mosaic diff --git "a/developer/tool\360\237\226\211/clean" "b/developer/tool\360\237\226\211/clean" deleted file mode 100755 index a32578f..0000000 --- "a/developer/tool\360\237\226\211/clean" +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# Removes all files found in the build directories. It asks no questions as to -# how or why the files got there. Be especially careful with the 'bash' -# directory if you have authored scripts for release, add a `bash🖉` -# directory instead of putting them in `bash`. - -# input guards - env_must_be="developer/tool🖉/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - -# remove files - set -x - cd "$REPO_HOME"/developer - - # rm_na currently does not handle links correctly - rm -r scratchpad/* - - rm_na jvm/* - rm_na bash/* - set +x - -echo "$(script_fn) done." - diff --git "a/developer/tool\360\237\226\211/env" "b/developer/tool\360\237\226\211/env" deleted file mode 100644 index 48704b0..0000000 --- "a/developer/tool\360\237\226\211/env" +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="tool_shared/bespoke🖉/env" - error_bad_env=false - error_not_sourced=false - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - error_bad_env=true - fi - if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - echo "$script_afp:: This script must be sourced, not executed." - error_not_sourced=true - fi - if $error_not_sourced; then exit 1; fi - if $error_bad_env; then return 1; fi - -# so we can do the build - -export PATH=\ -"$REPO_HOME"/developer/tool🖉/\ -:"$JAVA_HOME"/bin\ -:"$PATH" - - -# Developed sources always come from javac🖉. -# Everything else comes from scratchpad. -# Run `make` to compile sources to the scratchpad -# Run `release` to put the third party tools (that are included with the release) on the scratchpad. -# -export CLASSPATH=\ -"$JAVA_HOME"/lib\ -:"$REPO_HOME"/developer/log\ -:"$REPO_HOME"/developer/scratchpad\ -:$LOGGER_FACADE\ -:$LOGGER_CLASSIC\ -:$LOGGER_CORE\ -:"$CLASSPATH" - -export SOURCEPATH=\ -"$REPO_HOME"/developer/javac🖉/\ -:"$SOURCEPATH" - -export PATH=\ -"$REPO_HOME"/developer/bash\ -:"$PATH" - -# misc - - # make .githolder and .gitignore visible - alias ls="ls -a" - -# some feedback to show all went well - - export PROMPT_DECOR="$PROJECT"_developer - export ENV=$(script_fp) - echo ENV "$ENV" - cd "$REPO_HOME"/developer/ - - - diff --git "a/developer/tool\360\237\226\211/gather_source_links" "b/developer/tool\360\237\226\211/gather_source_links" deleted file mode 100755 index 689db3b..0000000 --- "a/developer/tool\360\237\226\211/gather_source_links" +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# This script links the sources into the scratchpad directory tree according to the package directory hiearchy. - -# Input guards - - env_must_be="developer/tool🖉/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - -# Link sources into the package tree - - package_tree="scratchpad/com/ReasoningTechnology/$PROJECT" - mkdir -p "$package_tree" - echo "Package: $package_tree" - - echo -n "Linking:" - for source_file in javac🖉/*.java; do - echo -n " $(basename "$source_file")" - link_target="$package_tree/$(basename "$source_file")" - if [ ! -L "$link_target" ]; then - ln -s "$(realpath --relative-to="$package_tree" "$source_file")" "$link_target" - fi - done - echo "." - -echo "$(script_fp) done." diff --git "a/developer/tool\360\237\226\211/gather_third_party" "b/developer/tool\360\237\226\211/gather_third_party" deleted file mode 100755 index 1e81ace..0000000 --- "a/developer/tool\360\237\226\211/gather_third_party" +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# This script expands Mosaic third party projects onto the scratchpad. This is done before releasing or running local ad hoc tests, so that the third party tools will be present. I.e. this is for creating a 'fat' jar. - -# Input guards - - env_must_be="developer/tool🖉/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - -# Expand the third party tools into the package tree - - echo "Expanding .jar files to be included with Mosaic into scratchpad." - - third_party_jars=( - "$LOGGER_FACADE" - "$LOGGER_CLASSIC" - "$LOGGER_CORE" - ) - - pushd scratchpad >& /dev/null - - for jar in "${third_party_jars[@]}"; do - if [ -f "$jar" ]; then - echo "including $jar" - jar -xf "$jar" - else - echo "Warning: JAR file not found: $jar" - fi - done - - # we are not currently using modules - rm -rf module-info.class - - - -echo "$(script_fp) done." diff --git "a/developer/tool\360\237\226\211/make" "b/developer/tool\360\237\226\211/make" deleted file mode 100755 index df1ed8e..0000000 --- "a/developer/tool\360\237\226\211/make" +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="developer/tool🖉/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - -echo "Compiling files..." - set -x - javac -Xlint:deprecation -g -d scratchpad javac🖉/*.java - set +x - if [ $? -ne 0 ]; then - echo "Compilation failed." - exit 1 - fi - -echo "Creating bash wrappers..." - mkdir -p bash - # wrapper is a space separated list - wrapper=$(bash_wrapper_list) - for file in $wrapper;do - cat > bash/$file << EOL -#!/bin/bash -java com.ReasoningTechnology."$PROJECT".$file -EOL - chmod +x bash/$file - done - -echo "$(script_fp) done." - diff --git "a/developer/tool\360\237\226\211/release" "b/developer/tool\360\237\226\211/release" deleted file mode 100755 index c8c32fe..0000000 --- "a/developer/tool\360\237\226\211/release" +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# before running this script, gather everything needed for the release on the scratchpad - -# input guards - - if [ -z "$REPO_HOME" ]; then - echo "$(script_fp):: REPO_HOME is not set." - exit 1 - fi - - env_must_be="developer/tool🖉/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: must be run in the $env_must_be environment" - exit 1 - fi - - cd "$REPO_HOME"/developer - - if [ ! -d scratchpad ]; then - echo "$(script_fp):: no scratchpad directory" - exit 1 - fi - - -# Inform the user - - echo "The pwd for this script is `pwd`." - -# Function to copy and set permissions - - install_file() { - source_fp="$1" - target_dp="$2" - perms="$3" - - target_file="$target_dp/$(basename "$source_fp")" - - if [ ! -f "$source_fp" ]; then - echo "install_file:: Source file '$source_fp' does not exist." - return 1 - fi - - if ! install -m "$perms" "$source_fp" "$target_file"; then - echo "Error: Failed to install $(basename "$source_fp") to $target_dp" - exit 1 - else - echo "Installed $(basename "$source_fp") to $target_dp with permissions $perms" - fi - } - -# scratchpad --> .jar file - - mkdir -p jvm - jar_file=$(realpath jvm/"$PROJECT".jar) - - pushd scratchpad - - echo "scratchpad -> $jar_file" - jar cf $jar_file * - if [ $? -ne 0 ]; then - echo "Failed to create $jar_file file." - exit 1 - fi - - popd - -# move files to the release dir - - release_dir="$REPO_HOME/release" - bash_dir="$REPO_HOME/developer/bash" - wrapper=$(bash_wrapper_list) - - install_file "$jar_file" "$release_dir" "ug+r" - - for wrapper in $wrapper; do - install_file "$bash_dir"/"$wrapper" "$release_dir" "ug+r+x" - done - -echo "$(script_fp) done." diff --git a/document/An_Introduction_to_Structured_Testing.html b/document/An_Introduction_to_Structured_Testing.html new file mode 100644 index 0000000..384beb2 --- /dev/null +++ b/document/An_Introduction_to_Structured_Testing.html @@ -0,0 +1,1034 @@ + + + + + + + White Box Testing - Mosaic Project + + + +
+
+

An Introduction to Structured Testing

+

© 2024 Thomas Walker Lynch - All Rights Reserved.

+
+ + +

Introduction

+ +

This guide provides a general overview of testing concepts. It is + not a reference manual for the Mosaic Testbench itself. At the + time of writing, no such reference document exists, so developers and + testers are advised to consult the source code directly for implementation + details. A small example can be found in the Test_MockClass + file within the tester directory. Other examples can be found in projects + that make use of Mosaic.

+ +

A typical testing setup comprises three main components: + the Testbench, the test + routines, and a collection of units under + test (UUTs). Here, a UUT is any individual software or hardware + component intended for testing. Because this guide focuses on software, we + use the term RUT (routine under test) to denote + the unit under test in software contexts. Although we use software-centric + terminology, the principles outlined here apply equally to hardware + testing.

+ +

Each test routine supplies inputs to a RUT, collects the resulting + outputs, and determines whether the test passes or fails based on those + values. A given test routine might repeat this procedure for any number + of test cases. The final result from the test + routine is then relayed to the Testbench. Testers and developers write + the test routines and place them into the Testbench.

+ +

Mosaic is a Testbench. It serves as a structured environment for + organizing and executing test routines, and it provides a library of utility + routines for assisting the test writer. When run, the Testbench sequences + through the set of test routines, one by one, providing each test routine + with an interface to control and examine standard input and output. Each + test routine, depending on its design, might in turn sequence through + test cases. During execution, the test + bench records pass/fail results, lists the names of the test routines that failed, + and generates a summary report with pass/fail totals.

+ +

At the time of this writing, Mosaic does not provide features for + breaking up large test runs into parallel pieces and then load balancing + those pieces. Perhaps such a feature will be developed for a future version. + However, this does not prevent an enterprising tester from running multiple + Mosaic runs with different test routines in parallel in an ad hoc manner, or + with other tools.

+ +

Function versus Routine

+ +

A routine is an encapsulated sequence of instructions, with a symbol + table for local variables, and an interface for importing and exporting + data through the encapsulation boundary. This interface + maps arguments from a caller + to parameters within the routine, enabling data + transfer at runtime. In the context of testing, the arguments that bring + data into the routine are referred to as + inputs, while those that carry data out are called + outputs. Notably, in programming, outputs are often called + return values.

+ +

In computer science, a pure function is a routine + in which outputs depend solely on the provided inputs, without reference to + any internal state or memory that would persist across calls. A pure function + produces the same output given the same inputs every time it is called. + Side effects, such as changes to external states or reliance on external + resources, are not present in pure functions; any necessary interactions + with external data must be represented explicitly as inputs or outputs. + By definition, a function produces a single output, though this output can + be a collection, such as a vector or set.

+ +

Routines with internal state variables that facilitate temporal behavior + can produce outputs that depend on the sequence and values of prior + inputs. This characteristic makes such routines challenging to + test. Generally, better testing results are achieved when testing pure + functions, where outputs depend only on current inputs.

+ + +

Block and Integration

+ +

A test routine provides inputs to a RUT and collects its outputs, often + doing so repeatedly in a sequence of test cases. The test routine then + evaluates these values to determine if the test has passed or failed.

+ +

When a test routine evaluates a RUT that corresponds to a single function + or module within the program, it performs a block + test.

+ +

When a test routine evaluates a RUT that encompasses multiple program + components working together, it is conducting + an integration test.

+ +

Integration tests typically involve combining substantial components of a + program that were developed independently. Such tests can occur later in the + project timeline, where they can reveal complex and unforeseen interactions + between components when there is not adequate time to deal with them. To + help address these challenges, some software development methodologies + recommend to instead introducing simplified versions of large components + early in the development process, and to then refine them over time.

+ +

Failures and Faults

+ +

A test routine has two primary responsibilities: firstly in supplying inputs + and collecting outputs from the RUT, and secondly in determining whether the RUT + passed or failed the test. This second responsibility is handled by + the failure decider. When the failure decider is not + an explicit function in the test routine,its functionality will still be present + in the test routines logic.

+ +

A given failure decider might produce false + positive or false negative results. A + false positive occurs when the failure decider indicates that a test has + passed when it should have failed; hence, this is also known as + a false pass. Conversely, a false negative occurs + when the decider indicates failure when the test should have passed; hence, this also + known as a false fail. An ideal + failure decider would produce neither false passes nor false + fails.

+ +

In a typical testing workflow, passing tests receive no further + scrutiny. In contrast, failed tests are further examined to locate the + underlying fault. Thus, for such a workflow, false fails are likely to be + caught in the debugger, while false passes might go undetected until + release, then be discovered by users. Early in the project timeline, this + effect can be mitigated by giving passing cases more scrutiny, essentially + spot-checking the test environment. Later, in regression testing, the volume + of passing cases causes spot-checking to be ineffective. Alternative + strategies include redundant testing, better design of the failure decider, + or employing other verification techniques.

+ +

A failure occurs when there is a deviation between the observed output from a RUT and the ideal output. When the ideal output is unavailable, a reference output is often used in its place. When using reference outputs, the accuracy of test results depends on both the accuracy of the failure decider and the accuracy of the reference outputs themselves.

+ +

Some testers will refer to an observed output as an actual output. Additionally, some testers will call reference outputs golden values, particularly when those values are considered highly accurate. However, the terminology introduced earlier aligns more closely with that used in scientific experiments, which is fitting since testing is a form of experimentation.

+ +

A fault is a flaw in the design, implementation, or realization of a + product that, if fixed, would eliminate the potential for a failure to be + observed. Faults are often localized to a specific point, but they can also + result from the mishandling of a confluence of events that arise during + product operation.

+ +

The goal of testing is to create conditions that make failures observable. Once a failure is observed, it is the responsibility of developers, or testers in a development role, to debug these failures, locate the faults, and implement fixes.

+ +

Root cause analysis extends beyond the scope of development and test. It + involves examining project workflows to understand why a fault exists in the + product. Typically, root cause analysis will identify a root cause that, if + "fixed," would not eliminate the potential for a failure to be observed in + the current or near-term releases. Consequently, root cause analysis is + generally not a priority for design and testing but instead falls within the + domain of project management.

+ +

A technique commonly used to increase the variety of conditions—and thus the likelihood of creating conditions that reveal faults—is to run more tests with different inputs. This is called increasing the test coverage.

+ +

The Mosaic tool assists testers in finding failures, but it does not directly help with identifying the underlying fault that led to the failure. Mosaic is a tool for testers. However, these two tasks—finding failures and locating faults—are not entirely separate. Knowing where a failure occurs can provide the developer with a good starting point for locating the fault and help narrow down possible causes. Additionally, once a developer claims to have fixed a fault, that claim can be verified through further testing.

+ +

Testing Objectives

+ +
    +
  • + Verification Testing
    + Purpose: To confirm that the software or system meets the specified requirements and design. Verification testing ensures that each component behaves as expected according to specifications, often conducted throughout development to catch any deviations from the original plan. +
  • + +
  • + Regression Testing
    + Purpose: To ensure that recent changes or additions to the codebase have not introduced new errors. This type of testing checks that previously tested functionalities still work as intended, making it essential for maintaining stability as updates are made. +
  • + +
  • + Development Testing
    + Purpose: To evaluate code correctness and functionality during the development process. Development testing is often exploratory, allowing developers to check whether their code performs as expected before formal testing. It can include unit testing, integration testing, and other quick checks to validate functionality on the fly. +
  • + +
  • + Exploratory Testing
    + Purpose: To uncover unexpected issues by testing the software in an unscripted manner. Exploratory testing allows testers to investigate the software's behavior outside of planned test cases, often discovering edge cases or flaws that structured tests may miss. +
  • + +
  • + Performance Testing
    + Purpose: To assess how the software performs under expected and extreme conditions. Performance testing evaluates response times, resource usage, and stability, often covering areas like load, stress, and scalability testing. This objective ensures the system can handle the demands it will face in production. +
  • + +
  • + Compliance Testing
    + Purpose: To confirm that the software adheres to regulatory, legal, and industry standards. Compliance testing ensures that the system meets external requirements, which may include accessibility, data privacy, and industry-specific standards. +
  • + +
  • + Security Testing
    + Purpose: To identify vulnerabilities and ensure the software is protected against unauthorized access and threats. Security testing checks for risks like data breaches, weak authentication, and exposure to known vulnerabilities, helping to safeguard sensitive information and user privacy. +
  • + +
  • + Compatibility Testing
    + Purpose: To verify that the software works across different environments, devices, and platforms. Compatibility testing ensures consistent functionality and appearance across browsers, operating systems, hardware configurations, and other setups. +
  • + +
  • + Acceptance Testing
    + Purpose: To determine if the software meets the end user's needs and expectations. Acceptance testing, often conducted by stakeholders or QA teams, validates that the software is usable and functional from a real-world perspective, acting as the final check before release. +
  • + +
  • + Documentation Testing
    + Purpose: To ensure that all documentation, guides, and user manuals are accurate and reflect the current software functionality. Documentation testing verifies that users have clear, up-to-date information for effective usage and troubleshooting. +
  • + +
  • + Usability Testing
    + Purpose: To confirm that the software is user-friendly and intuitive. Usability testing focuses on the ease of use, ensuring that end users can navigate and interact with the software without unnecessary friction, leading to a positive user experience. +
  • + +
+ +

The Moasic Testbench is useful for any type of testing that can be + formulated as test routines testing RUTs. This certainly includes + verification, regression, development, exploratory testing. It will + include the portions of performance, compliance, security, compatibility, + and acceptance testing that fit the model of test routines and RUTs. Only + recently has can it be imagined that the Mosaic TestBench can be used with + documentation testing. However, it is now possible to fit an AI API into a + test routine, and turn a document into a RUT. Usability testing often + depends in other types of tests, so to this extent the Mosaic Testbench + can play a role. However, usability is often also in part feedback from + users. So short of putting users in the Matrix, this portion of usability + testing remains outside the domain of the Mosaic Testbench, though come to + think of it, the Mosaic Testbench could be used to reduce surveys to pass + fails.

+ +

Each test objective will lead to writing tests of a different nature.

+ + +

Unstructured Testing

+ +

This section outlines some common approaches + to unstructured testing, often referred to + as black box testing. Black boxes are inherent + in even the most structured testing approaches, as at the lowest levels of + analysis, elements will always remain opaque. Even in the most highly + detailed test of logic possible, one that examines a RUT down to the + individual logic gates, each gate would be treated as a black box.

+ +

Reference Output Based Testing

+ +

In reference output based testing, an ordering + is assigned to the inputs for + the routine under test, as well as to + its outputs. Through this ordering the inputs + and outputs become vectors. Thus the routine under test is given + an input vector and it returns + an observed output vector.

+ +

A Reference Model is then + given the same input vector, and then it + produces a reference output vector. The reference + output vector has the same component ordering as the + observed output vector. + +

The failure detection function then compares + each observed output vector with its corresponding reference output vector. If + they do not match, the test is deemed to have failed.

+ +

It follows that in reference output based testing, the accuracy of the + test results depends solely on the accuracy of the Reference Model.

+ +

When the implementation of the Reference Model is unrelated to the + routine under test, we tend to expect that the errors produced by the + Reference Model will be uncorrelated with those produced by the routine + under test, and thus not probable to coincide. This property will bias + test routines towards delivering false fails. As noted earlier, false fails are + likely to be caught as test fails are followed up with further + scrutiny. It follows that reference output based testing can potentially + deliver a high degree of accuracy even though the reference model is not + ideal.

+ +

Property Check Testing

+ +

Property Check Testing is an alternative to + reference output based testing. Here, rather than comparing each observed + output to a reference output, the observed output is validated against + known properties or expected characteristics.

+ +

For example, given an integer as input, a function that correctly squares + this input will preserve the parity of the input, as an odd number squared + will be odd, and an even number squared will be even. The failure decider + can check this property for each test case, and if it does not hold, the + test case fails.

+ +

Note for the square RUT test, this proposed property check is weak. Given + a uniform distribution, half the time an errant square will still have the + correct parity. There are stronger property checks that could be done for + squares, but the point here is one of illustration. A weak property check + would not recognize many failures, and thus be biased towards false pass + decisions. Those are the bad ones, as passing tests typically receive no + further scrutiny.

+ +

Spot Checking

+ +

In spot checking, the function under test is checked against one or two + input vectors. When using a black box approach, these are chosen at + random.

+ +

Moving from zero to one is an finite relative change, i.e., running a + program for the first time requires that many moving parts work together, + parts that have never been tried before; hence, a tremendous amount is + learned about the logic and setup when the first test runs. Such a first + test is called a smoke test, a term that + has literal meaning in the field of electronics testing.

+ +

Exhaustive Testing

+ +

A test routine will potentially run multiple test cases against a given + RUT. If the RUT is a pure function, then per test case, a single test + vector will be given to the RUT, and a single output vector will be + returned. However, if the RUT is sequential in nature, for each test case + there will be a sequence of input vectors, and potentially a sequence of + output vectors.

+ +

The set of possible inputs for a RUT, were members are either individual + vectors, or vector sequences, constitutes the input + space. Test coverage is typically given + as the proportion or inputs tested to the total in the input space, + reported as a percentage./p> + +

When the RUT is a pure function, the input space is an enumeration of all + possible input vectors. If the inputs include arbitrary long strings, then it + will not be possible to complete such an enumeration, the best that can + be done is to generate more and more inputs upon demand. +

+ +

When the RUT has sequential behavior, achieving full coverage requires + giving the RUT every possible starting input, and then sequencing it to a + point of hitting a stop state or cycle state in every possible way. Again + if inputs can be arbitrarily long strings, such an enumeration can not be + completed. Furthermore, if the RUT state is encapsulated unseen in a black + box, it might be very difficult, or impossible, to detect when the state + has cycled.

+ +

Exhaustive testing is said to have been + done when every single input in the input space has been tested. + An exhaustive test will have obtained 100% coverage, with no rounding + done in the coverage computation.

+ +

Suppose that a fault appears at time t₀. Suppose there is a duration of + time of interest, Δ, that begins at or later than t₀. Suppose further + there exists a given test and test case that fails due to the fault, but + would not otherwise fail. Then a failure is + reproducible during Δ, if and only if the given test and test case + would fail if run at any time during Δ, and no matter how many times it is + run.

+ +

For a RUT that is a pure function, this definition is the same as saying + the test case fails at the same input value every time during Δ, when + ideally is should have passed. For a sequential RUT, it is saying that the + same input vector sequence will always lead to a failure, when ideally it + would lead to a pass.

+ +

Although the same test routine is run with identical inputs, a failure + might not be reproducible due to other sources of variability, as + examples:

+
    +
  1. The contract made with the programmer for using the exact same + inputs for the exact same test routine was broken. +
  2. Use of uninitialized memory. +
  3. Software updates or platform changes in between test runs during Δ. +
  4. Green thread, or real thread, scheduling differences, whether done by the OS or by the interpreter. +
  5. Using the system time as data, or other system parameter. +
  6. Race conditions. +
  7. Getting values from a randomly seeded pseudo random number generator.
  8. +
  9. Reaching out of the architecture model for values, as examples + using performance measures or by timing events.
  10. +
  11. A hardware fault that is sensitive to a myriad of possible environmental + influences.
  12. +
+ +

Exhaustive testing will find all failures that are reproducible. It might + find failures that are not reproducible. The probability of witnessing + non-reproducible failures will typically go up when using the technique + of over testing, i.e. running even more than an + exhaustive number of tests.

+ +

Structured Testing

+ +

Structured testing is a form of white box testing, where the tester + examines the code being tested and applies various techniques to it + to increase the efficiency of the testing.

+ +

The Need for Structured Testing

+ +

All types of black-box testing have a serious problem in that the search + space for failures grows exponentially as the number of inputs grows. You have + probably heard about this sort of thing before, but you might not appreciate + just how severe the situation is. To illustrate, we will consider the simplest of + programs, one that adds two numbers. When the RUT is a black box, the test routine + only has access to the interface, so it appears like this:

+ +

+        int8 sum(int8 a, int8 b){
+        ...
+        }
+    
+ +

Here, two int8 values are being added, so an input test vector will have + 16 bits. The result is also an int8, so an output vector will have 8 bits.

+ +

As the internals of the RUT are unknown, it could contain unexpected logic, like this:

+ +

+        int8 sum(int8 a, int8 b){
+        if(a == 248 && b == 224) return 5;
+        else return a + b;
+        }
+    
+ +

A developer might not be writing malicious code when something like this + appears; instead, the code might have been pulled from somewhere else and + dropped in. There could have been a special case in this situation on another + machine. Perhaps the code was generated by an AI, or it could be leftover + debug information. This example illustrates that testers are typically not + responsible for understanding developer code. Though in this case the logic + is obvious, there can be more obscure functions that testers cannot take the + time to understand, which might exhibit similar unexpected behavior.

+ +

As this is a black box, the numbers 248 and 224 are not known to the test writer. + Therefore, the only effective unstructured testing approach that is guaranteed to + find this failure is exhaustive testing.

+ +

Exhaustive testing is feasible here. An input test vector with 16 bits will lead to + an input space of 65,536 points. Sixty-five thousand tests is trivial for a modern + desktop. The full test will take about 100 microseconds, and in this time the test + routine is guaranteed to find all failures. Note that in 50 microseconds, half of + the input space will be covered, so there is a 0.5 probability of finding a single + failure within that time. Generally, half the total time corresponds to a 0.5 probability + of finding a single failure.

+ +

Now, suppose that instead of looking for a reproducible fault, we have:

+

+      int8 sum(int8 a, int8 b){
+        if(a == 255 * rand() && b == 224 * rand()) return 5;
+        else return a + b;
+      }
+    
+ +

In this case, to find the fault, the test routine must guess the values of two independent + 8-bit random variables from a uniform distribution. As they are independent, we can combine + them and note that the test must guess a 16-bit value. If we consider an "exhaustive" test, + the tester will make 2^16 tries. Hence, the probability of finding this failure is:

+ +

+        1 - (1 - 2-16)216 = 0.6321...
+    
+ +

A small adjustment to the above equation is necessary to make it precise, because + sometimes 5 is the correct answer. Thus, with 216 test cases, there will + be certainty (a probability of 1.0) in finding all reproducible errors and about + a 0.63 probability of finding a single random fault. The two probabilities are not + as far apart as one might expect, given that the failure is "jumping around."

+ +

Now, let's go back to the reproducible error case, but this time, suppose we are working + with an int16:

+ +

+      int16 sum(int16 a, int16 b){
+        ...
+      }
+    
+ +

Now an input vector has 32 bits, giving an input space with 21,474,836,480 points. + Our computer will require about 33 seconds of compute time for this. Adding around + 10 seconds for wall-clock time, let’s call it 40 seconds. Testing would be barely + practical if it took 40 seconds to test such a simple RUT as this, but perhaps we + would invest in a faster computer?

+ +

+      int32 sum(int32 a, int32 b){
+        ...
+      }
+    
+ +

Now, suppose we are adding 32-bit numbers. The input space now has 18,446,744,073,709,551,616 points. + Compute time, without overhead, will be about 4,496 years! Suffice it to say, we have discovered that + testing the addition of two 32-bit numbers exhaustively is impractical. Even if we break the problem + into 1,000 pieces on different processors and use a state-of-the-art server farm, it would still take + months and cost a significant amount. What will you tell the boss?

+ +

But wait! What if we move to 64-bit computing?

+ +

+        int64 sum(int64 a, int64 b){
+        ...
+        }
+    
+ +

The input space now has:

+

+        340,282,366,920,938,463,463,374,607,431,768,211,456
+    
+

points. That's about 340 undecillion. Compute time is 83 sextillion years—or about + 6 trillion times the age of the universe. Even with all the processing power on Earth, + even if you're willing to accept a probability of 0.1 of finding the failure, it would + take a thousand times longer than the age of the universe to test a function as simple + as adding two numbers. Clearly, there must be a better approach.

+ + +

Summary Table

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
BitsInput SpaceCompute Time
8 bits6.55 x 104100 μs
16 bits2.15 x 101033 s
32 bits1.84 x 10194,496 years
64 bits3.40 x 10386 x 1012 times the age of the universe
+ +

A typical response from people when they see this is that the knew it went up + fast, but did not know it went up this fast. It is also important to note, there + is a one to one relationship between percentage of time to achieving exhaustive + coverage, and percentage of coverage. Half the time, 50 percent coverage. In + the last row of the table, to have reasonable test times, there would be coverage + 10-18 percentage coverage. At that level of coverage there is really + no reason to test. Hence, this table is not limited to speaking about exhaustive + testing, rather it speaks to black box testing in general.

+ +

Informed Spot Checking

+ +

In white box testing, we take the opposite approach to black box + testing. The test writer does look at the code implementation and + must understand how to read the code. Take our 64-bit adder example of + the prior section. Here in this section we will apply a white box + technique known as Informed Spot Checking.

+ +

This is the prior example as a black box:

+ +

+      int64 sum(int64 a, int64 b){
+        ...
+      }
+    
+ +

And here it is as a white box:

+ +

+      int64 sum(int64 a, int64 b){
+        if(a == 5717710 && b == 27) return 5;
+        else return a + b;
+      }
+    
+ +

When following the approach of Informed Spot Checking, the tester examines + the code and sees there is a special case for a = 5717710 + and b = 27, which becomes the first test case. There’s also + a special case for when the sum exceeds the 64-bit integer range, both in + the positive and negative directions; these become two more test + cases. Finally, the tester includes a few additional cases that are not + edge cases.

+ +

Thus, by using white box testing instead of black box testing, the tester finds all + the failures with just 4 or so test cases instead of

+

+      340,282,366,920,938,463,463,374,607,431,768,211,456 
+     
+

cases. Quite a savings, eh?

+ +

There are notorious edge cases in software, and these can often be seen + by looking at the RUT. Zeros and inputs that lead to index values just off + the end of arrays come to mind are common ones. Checking a middle value + and edge cases is often an effective approach for finding failures.

+ +

There is an underlying mechanism at play here. Note that it takes two + points to determine a line. In Fourier analysis, it takes two samples per + period of the highest frequency component to determine an entire + waveform. Code also has patterns, patterns that are disjoint at edge + cases. Hence if a piece of code runs without failures for both edge cases + and spot check values in between, it will often run without failures over + an entire domain of values. This effect explains why ad hoc testing has + lead to so much relatively fail free code.

+ +

Informed Spot Checking is especially valuable in early development, as it + provides useful insights with minimal investment. In the early development + stage, making more investment in test code is unwise due to the code being + in flux. Test work is likely to get ripped up and replaced.

+ +

The idea of test work being ripped up and replaced highlights a drawback + of white box testing. Analysis of code can become stale when implementations + are changed. However, due to the explosion in the size of the input space + with even a modest number of inputs, white box testing is necessary if there + is to be much commitment to producing reliable software or hardware.

+ +

Refactoring the RUT

+ +

Refactoring a RUT to make it more testable can be a powerful method for + turning testing problems that are exponentially hard due to state + variables, or very difficult to debug due to random variables, into + problems that are linearly hard. According to this method, the + tester is encouraged to examine the RUT to make the testing problem + easier.

+ +

By reconstructing the RUT I mean that we refactor the code to bring + any random variables or state variables to the interface where they + are then treated as inputs and outputs.

+ +

If placing state variables on the interface is adopted as a discipline by + the developers, reconstruction will not be needed in the test phase, or if + it is needed, white box testers will see this, and it will be a bug that + has been caught. Otherwise reconstruction leads to two versions of a + routine, one that has been reconstructed, and the other that has not. The + leverage gained on the testing problem by reconstructing a routine + typically more than outweighs the extra verification problem of comparing + the before and after routines.

+ +

As an example, consider our adder function with a random fault. As we + know from prior analysis, changing the fault to a random number makes + testing harder, but perhaps more importantly, it makes it nearly impossible + to debug, as the tester can not hand it to the developer and say, + 'it fails in this case'.

+

+      int64 sum(int64 a, int64 b){
+        if( a == (5717710 * rand()) && b == (27 * rand()) ) return 5;
+        else return a + b;
+      }
+    
+ +

The tester refactors this function as:

+

+      int64 sum( int64 a, int64 b, a0 = 5717710*rand() ,b0 = 27*rand() ){
+        if( a == a0 && b == b0 ) return 5;
+        else return a + b;
+      }
+    
+ +

Here a0 and b0 are added to the interface as + optional arguments. During testing their values will be supplied, during + production the defaults will be used. Thus, we have broken the one + test problem into two, the question if sum works, and the + question if the random number generation works.

+ +

Failures in sum found during testing are now reproducible. + If the tester employs the informed spot checking the failure will + be found with few tests, and the point in the input space where the + failure occurs can be reported to development and used for debugging.

+ +

Here is a function that keeps a state variable between calls.

+

+    int state = 0;
+    int call_count = 0; 
+    void state_machine(int input) {
+        int choice = (input >> call_count) & 1; 
+        switch (state) {
+            case 0:
+                printf("State 0: Initializing...\n");
+                state = choice ? 0 : 1;
+                break;
+            case 1:
+                printf("State 1: Processing Path A...\n");
+                state = choice ? 0 : 2; 
+                break;
+            case 2:
+                printf("State 2: Processing Path B...\n");
+                state = choice ? 0 : 3;
+                break;
+        }
+        call_count++;
+    }
+    
+ +

The Mosaic Testbench makes standard out available to the test routine in + an array so we can capture and examine the print value while testing this + RUT. Because of the state variables, state + and count, this routine will behave differently each time it + is called. A black box test will have a large number of input vector + sequences to try. The failure occurs in the call after being in state 2 + and the count is such that the choice is to go to state 3.

+ +

+    int state = 0;
+    int call_count = 0; 
+    void state_machine(int input ,int state0 = state ,int call_count0 = call_count) {
+        int choice = (input >> call_count0) & 1; 
+        switch (state0) {
+            case 0:
+                printf("State 0: Initializing...\n");
+                state = choice ? 0 : 1;
+                break;
+            case 1:
+                printf("State 1: Processing Path A...\n");
+                state = choice ? 0 : 2; 
+                break;
+            case 2:
+                printf("State 2: Processing Path B...\n");
+                state = choice ? 0 : 3;
+                break;
+        }
+        call_count = call_count0 + 1;
+    }
+    
+ +

Here the test routine supplies state0 and call_count0 + as inputs. The test routine treats state and call_ccount + as outputs, so this is then a pure function. As a pure function it is a much easier + testing problem. Now instead of a combinatorially hard problem involving input + sequences, the test routine can visit each of the three states, and set the input + such that each visits the two next states. That is six test cases to see everything + that this function is capable of doing.

+ +

Any time the RUT is refactored in the testing phase, it raises the + question if the refactored code maintains the required functionality. + This becomes another verification problem, which might or might not + be verified through testing. One way to manage this issue is to + take the refactoring problems back to the developers to have them + adopt the code into the project. Then it becomes the original code.

+ +

Bottom Up Testing

+ +

When a function corresponds directly to CPU instructions, such as is the + case for the + operator, we typically trust that it will give + the right answer. The same can be said for the call and return + dynamic. Unless we are working on a new compiler, it is typically assumed + that this works. Tests for it are not included for testing if calls work in + application program test suites. +

+ +

The reason for this trust is that CPU instructions, and function calls + are already extensively tested, both directly by the manufacturers, and + through widespread use. Though this trust is not always warranted as in + the case of the Intel Pentium divider, which had failure cases.

+ +

We can decompose a testing problem into trusted and untrusted components. + We call routines that are trusted building blocks, + then we use the building blocks to build up larger routines, and then + test those to create larger building blocks. At the end we will have + built up a trustworthy program.

+ +

This approach parallels what developers do when they write programs. They + start with primitive programs that come with the language or from + libraries, and then they compose these to write custom functions.

+ +

The following is an expansion of our adder example for creating and + testing an adder for 1024 bit numbers. For purposes of presentation, we + will refer to int256 as a type that corresponds to array of + 32 bytes, and uint1 as a 1 bit unsigned integer, i.e. 0 or + 1.

+ +

+    {uint1, uint64} full_adder(uint64 a, uint64 b, uint1 c0) {
+        uint64 partial_sum = a + b;
+        uint64 sum = partial_sum + c0;
+        uint1 carry_out = (partial_sum < a) || (sum < partial_sum);
+        return {carry_out, sum};
+    }
+    
+ +

Here is a 256 bit adder made from 64 bit adders.

+ +

+    {uint1, int256} add_256(int256 a, int256 b) {
+        uint1 carry_in = 0;
+        int64 sum_parts[4];  // Array to store each 64-bit segment of the sum
+
+        for i = 0 to 3 {
+            // Get the i-th 64-bit segments of a and b
+            int64 a_part = (a >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
+            int64 b_part = (b >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
+
+            // Perform the full addition on each 64-bit part
+            {carry_out, sum_parts[i]} = full_adder(a_part, b_part, carry_in);
+
+            // Update carry-in for the next 64-bit segment
+            carry_in = carry_out;
+        }
+
+        int256 sum = 0;
+        for i = 0 to 3 {
+            sum |= (sum_parts[i] << (i * 64));
+        }
+
+        return {carry_in, sum};
+    }
+    
+ +

According to the bottom up technique, we first test + the full_adder, which is not a difficult testing problem. It + employs well known trusted operations, and has a couple of interesting + special case conditions. Given the numeric nature of this code, these + special case conditions are probably better verified by proof than by + testing, but they can be tested.

+ +

Once the full_adder can be trusted, testing add_256 + reduces to checking that the various 64 bit parts are extracted and then + packed correctly, + and are not, say, offset by one, and that the carries are properly communicated + during the add.

+ +

Note this test also trusts the fact that ripple carry addition is a valid + algorithm for assembling the pieces. Thus there is a new verification + problem, that for the algorithm. In this case, ripple carry addition is + already a trusted algorithm.

+ +

Testing of full_adder could be further simplified with + refactoring, by moving the loop control variables to the interface and the + carry_in and carry_out to the interface. + As i is recycled, it would become two variables, + say i and j. Once the loop control variables + are on the interface it is straight forward to test the packing. Once the + carries are on the interface it is straight forward to test the + carries.

+ +

In general all programs and circuits can be conceptualized as functional + units, channels, and protocols. A test that shows that these work as specified, + shifts the test problem from the RUT to the specification.

+ +

Adding to the code

+ +

It is a common practice to add property checks to the code for gathering + data about failures or other potential problems. These will then write to + log files, or even send messages back to the code maintainers. By doing + this the testers benefit from the actual use of the product as though it + were a test run. When failures are found, such code might then trigger + remedial or recovery actions.

+ +

About Reference Outputs and Reference Properties

+ +

When testing during development, reference outputs often come from the + developers or testers themselves. They know what they expect from the + routines, but they do not know if the code will meet these expectations, + so they write tests. Typically, they try to imagine the hardest possible + cases. However, sometimes a young developer avoids testing challenging + cases to sidestep the risk of failures—this is, of course, a poor approach + that can lead to undetected issues.

+ +

Often, specification authors provide reference outputs or extensive test + suites that must be passed to achieve certification. Architects also + contribute by creating multi-level specifications—for the entire program, + for the largest components, and for communication protocols between + components. These specifications often serve as high-quality reference + outputs and property checks that can be applied to the model during testing. + The goal of developers and testers is to meet these specifications, making + failures directly relevant to the development process and program design.

+ +

Experts in a specific area sometimes provide test data, maintaining + a database of reference data as a resource for validating outputs. + For some types of code, experts also supply property checks, which + evaluate whether outputs satisfy essential properties rather than specific + values. Depending on the domain, these properties can be an important aspect + of the testing process.

+ +

Each time a bug is found, a test should be created to capture a failure + related to that bug. Ideally, such tests are written with minimal + implementation-specific details so they remain relevant even after code + changes. These tests are then added to a regression testing suite, ensuring + that future changes do not reintroduce the same issues.

+ +

For applications involving multi-precision arithmetic, such as the earlier + adder example, reference data is often sourced from another established + multi-precision library, whether an open-source or commercial product. The + assumption is that an existing product will be more reliable than a newly + developed one, and since it’s implemented differently, its errors are likely + to be uncorrelated. This competitive testing, which is aspect of + compatibility testing, here being used for other objectives. In the limit, as + the RUT matures, this approach will tend to identify bugs in the reference + data from the other company as often it does in the RUT, which might be an + interesting effect.

+ +

In some cases, reference data comes from historical sources or existing + systems. When upgrading or replacing a legacy system, historical data + serves as a benchmark for comparison. Similarly, industry standards + and compliance datasets, particularly from regulatory organizations + like IEEE, NIST, or ISO, provide reliable reference points for applications + requiring standardized outputs. Compliance-driven tests are often required + for certification or regulatory approval in fields such as finance, + healthcare, and aerospace.

+ +

For cases requiring many inputs without needing specific reference values, + random number generators can provide extensive test data. Examples include in + comparative testing and when property checking. Random number generators can + also be configured to concentrate cases in specific areas of the input domain + that for some reason concerns the testers.

+ +

Customer and user feedback sometimes uncovers additional test cases, + especially when dealing with complex or evolving software. Feedback + reveals edge cases or expected behaviors that developers and testers + may not have anticipated, allowing teams to create reference points + for new test cases that cover real-world use cases and address user needs.

+ +

Conclusion

+ +

If you are a typical tester or developer reading through the previous list, + you might feel a bit disappointed. Unless you work in a specialized area, + are attempting to create a compatible product, or need to exercise the hardware, much + of that list might seem inapplicable. For many developers, the most + applicable advice remains: "During development, reference outputs often + come from the developers or testers themselves." I apologize if this seems + limiting, but consider this: the reason we run programs is to generate the + very data we're looking for. If that data were easily available, we wouldn’t + need the program.

+ +

In many ways, testing is about making developers and testers the first + users of the product. All products will have bugs; it’s far better for + experts to encounter these issues first.

+ +

Testing also facilitates communication among project members. Are the + architects, developers, and testers all on the same page about how the + product should work? The only way to find out is to run what has been built + and observe it in action. For this, we need test cases.

+ +

This circular problem—finding data that our program should generate - to test + the program itself — illustrates a fundamental limitation in software testing. + We encountered this in the discussion on unstructured, black-box testing: as + soon as we open the box to inspect the code, we are no longer just testing it, + but reasoning about it and even verifying it formally.

+ +

This, perhaps, hints at a way forward. Our program is a restatement of the + specification in another language. Verification, then, is an equivalence + check. We can run examples to demonstrate equivalence, but black-box testing + alone will have limited impact. Alternatively, we can examine our code and + try to prove that it matches the specification. Though challenging, this + approach is far more feasible than waiting ten times the age of the universe + to confirm our solution through black box testing.

+ +

Think of testing as a reasoning problem. Explain why the routine works and + how it contributes to meeting the specification. Work from the top down: if + the high-level components behave correctly, the program will meet the + specification. That’s the first step. Then explain why the breakdown of + those top-level components ensures correct behavior. Continue this process, + and then use tests to validate each link in this chain of reasoning. In this + way, you can generate meaningful reference values.

+ +
+ + + + diff --git a/document/LICENSE.txt b/document/LICENSE.txt new file mode 120000 index 0000000..4ab4373 --- /dev/null +++ b/document/LICENSE.txt @@ -0,0 +1 @@ +../LICENSE.txt \ No newline at end of file diff --git a/document/README.txt b/document/README.txt new file mode 120000 index 0000000..ecfa029 --- /dev/null +++ b/document/README.txt @@ -0,0 +1 @@ +../README.txt \ No newline at end of file diff --git a/document/example_proxy.java b/document/example_proxy.java new file mode 100644 index 0000000..7c7d0ad --- /dev/null +++ b/document/example_proxy.java @@ -0,0 +1,26 @@ +// 1. + +Mosaic_AllMethodsPublicProxy proxy = new Mosaic_AllMethodsPublicProxy(SomeClass.class); + +String methodName = "compute"; +Class returnType = int.class; +Object[] args = {42, 15}; + +Object result = proxy.invoke(someInstance, methodName, returnType, args); +System.out.println(result); + + +// 2. + +Method method = SomeClass.class.getDeclaredMethod("compute", int.class, int.class); +FunctionSignature sigFromReflection = new FunctionSignature(method); + +FunctionSignature sigFromInvocation = new FunctionSignature( + "com.example.SomeClass", + "compute", + int.class, + new Object[]{42, 15} +); + +System.out.println(sigFromReflection.equals(sigFromInvocation)); // Should be true + diff --git a/document/see_also b/document/see_also new file mode 120000 index 0000000..02eb81c --- /dev/null +++ b/document/see_also @@ -0,0 +1 @@ +../tool_shared/third_party/RT-project-share/document🖉/ \ No newline at end of file diff --git a/document/todo.txt b/document/todo.txt new file mode 100644 index 0000000..c71e6ea --- /dev/null +++ b/document/todo.txt @@ -0,0 +1,63 @@ + +1. More languages support, notably nodejs. + +2. This message: + + Running Test_File_0...Structural problem: unpack_file_path_0 does not accept a single IO argument. + Error: unpack_file_path_0 has an invalid structure. + + Perhaps only the second one, getting rid of 'Error:' + + "Bad type signature for method: unpack_file_path_0 does not accept a single IO argument." + +3. TestBench -> Testbench perhaps? + +4. fix emacs.el so that jdbx doesn't always start with Test_Util. (note the + prior start can be found with M-x p + + +5. should include a tool for aggregating test suite runs + FYI, I'm using + +6. need an example .bashrc for setting the prompt now that I removed references +to the 'resource' project and plan to deprecate it. + +7. should check stdin for unused input and report error also. + +8. move 'unlock_class` from Ariadne tests to Mosaic_Util. + +9. consider adding Maven for the third party tools dependencies. + +10. really should move the developer/release 'install' script to the bespoke🖉/env script + +2024-12-10T14:25:40Z + + Gives an error, but still passes? Is this right? + ... Running Test_Testbench + Expected output: Structural problem message for dummy_invalid_return_method. + Structural problem: dummy_invalid_return_method does not return Boolean. + Test_Testbench Total tests run: 3 + Test_Testbench Total tests passed: 3 + Test_Testbench Total tests failed: 0 + +2024-12-13T02:48:08Z + + Java has a love-hate-relationship with reflections ability to access + private methods; however, access is needed for white box testing. It + might be in the future for this feature to work that the Mosaic + project will have to expanded into the same module as that being + tested, rather than having its jar file accessed through the class + path. + + +2024-12-16T10:47:06Z + + FunctionSignature used with AllMethodsPublic currently does not + include the return type. It needs to have that. + +2024-12-20T06:09:38Z + + For Mosaic_Dispatcher, might be best to ditch the map and do lookup + to get the handle upon each call to a method, as we already have to + do the lookup to get the information for constructing the signature + for lookup. diff --git "a/document\360\237\226\211/An_Introduction_to_Structured_Testing.html" "b/document\360\237\226\211/An_Introduction_to_Structured_Testing.html" deleted file mode 100644 index 384beb2..0000000 --- "a/document\360\237\226\211/An_Introduction_to_Structured_Testing.html" +++ /dev/null @@ -1,1034 +0,0 @@ - - - - - - - White Box Testing - Mosaic Project - - - -
-
-

An Introduction to Structured Testing

-

© 2024 Thomas Walker Lynch - All Rights Reserved.

-
- - -

Introduction

- -

This guide provides a general overview of testing concepts. It is - not a reference manual for the Mosaic Testbench itself. At the - time of writing, no such reference document exists, so developers and - testers are advised to consult the source code directly for implementation - details. A small example can be found in the Test_MockClass - file within the tester directory. Other examples can be found in projects - that make use of Mosaic.

- -

A typical testing setup comprises three main components: - the Testbench, the test - routines, and a collection of units under - test (UUTs). Here, a UUT is any individual software or hardware - component intended for testing. Because this guide focuses on software, we - use the term RUT (routine under test) to denote - the unit under test in software contexts. Although we use software-centric - terminology, the principles outlined here apply equally to hardware - testing.

- -

Each test routine supplies inputs to a RUT, collects the resulting - outputs, and determines whether the test passes or fails based on those - values. A given test routine might repeat this procedure for any number - of test cases. The final result from the test - routine is then relayed to the Testbench. Testers and developers write - the test routines and place them into the Testbench.

- -

Mosaic is a Testbench. It serves as a structured environment for - organizing and executing test routines, and it provides a library of utility - routines for assisting the test writer. When run, the Testbench sequences - through the set of test routines, one by one, providing each test routine - with an interface to control and examine standard input and output. Each - test routine, depending on its design, might in turn sequence through - test cases. During execution, the test - bench records pass/fail results, lists the names of the test routines that failed, - and generates a summary report with pass/fail totals.

- -

At the time of this writing, Mosaic does not provide features for - breaking up large test runs into parallel pieces and then load balancing - those pieces. Perhaps such a feature will be developed for a future version. - However, this does not prevent an enterprising tester from running multiple - Mosaic runs with different test routines in parallel in an ad hoc manner, or - with other tools.

- -

Function versus Routine

- -

A routine is an encapsulated sequence of instructions, with a symbol - table for local variables, and an interface for importing and exporting - data through the encapsulation boundary. This interface - maps arguments from a caller - to parameters within the routine, enabling data - transfer at runtime. In the context of testing, the arguments that bring - data into the routine are referred to as - inputs, while those that carry data out are called - outputs. Notably, in programming, outputs are often called - return values.

- -

In computer science, a pure function is a routine - in which outputs depend solely on the provided inputs, without reference to - any internal state or memory that would persist across calls. A pure function - produces the same output given the same inputs every time it is called. - Side effects, such as changes to external states or reliance on external - resources, are not present in pure functions; any necessary interactions - with external data must be represented explicitly as inputs or outputs. - By definition, a function produces a single output, though this output can - be a collection, such as a vector or set.

- -

Routines with internal state variables that facilitate temporal behavior - can produce outputs that depend on the sequence and values of prior - inputs. This characteristic makes such routines challenging to - test. Generally, better testing results are achieved when testing pure - functions, where outputs depend only on current inputs.

- - -

Block and Integration

- -

A test routine provides inputs to a RUT and collects its outputs, often - doing so repeatedly in a sequence of test cases. The test routine then - evaluates these values to determine if the test has passed or failed.

- -

When a test routine evaluates a RUT that corresponds to a single function - or module within the program, it performs a block - test.

- -

When a test routine evaluates a RUT that encompasses multiple program - components working together, it is conducting - an integration test.

- -

Integration tests typically involve combining substantial components of a - program that were developed independently. Such tests can occur later in the - project timeline, where they can reveal complex and unforeseen interactions - between components when there is not adequate time to deal with them. To - help address these challenges, some software development methodologies - recommend to instead introducing simplified versions of large components - early in the development process, and to then refine them over time.

- -

Failures and Faults

- -

A test routine has two primary responsibilities: firstly in supplying inputs - and collecting outputs from the RUT, and secondly in determining whether the RUT - passed or failed the test. This second responsibility is handled by - the failure decider. When the failure decider is not - an explicit function in the test routine,its functionality will still be present - in the test routines logic.

- -

A given failure decider might produce false - positive or false negative results. A - false positive occurs when the failure decider indicates that a test has - passed when it should have failed; hence, this is also known as - a false pass. Conversely, a false negative occurs - when the decider indicates failure when the test should have passed; hence, this also - known as a false fail. An ideal - failure decider would produce neither false passes nor false - fails.

- -

In a typical testing workflow, passing tests receive no further - scrutiny. In contrast, failed tests are further examined to locate the - underlying fault. Thus, for such a workflow, false fails are likely to be - caught in the debugger, while false passes might go undetected until - release, then be discovered by users. Early in the project timeline, this - effect can be mitigated by giving passing cases more scrutiny, essentially - spot-checking the test environment. Later, in regression testing, the volume - of passing cases causes spot-checking to be ineffective. Alternative - strategies include redundant testing, better design of the failure decider, - or employing other verification techniques.

- -

A failure occurs when there is a deviation between the observed output from a RUT and the ideal output. When the ideal output is unavailable, a reference output is often used in its place. When using reference outputs, the accuracy of test results depends on both the accuracy of the failure decider and the accuracy of the reference outputs themselves.

- -

Some testers will refer to an observed output as an actual output. Additionally, some testers will call reference outputs golden values, particularly when those values are considered highly accurate. However, the terminology introduced earlier aligns more closely with that used in scientific experiments, which is fitting since testing is a form of experimentation.

- -

A fault is a flaw in the design, implementation, or realization of a - product that, if fixed, would eliminate the potential for a failure to be - observed. Faults are often localized to a specific point, but they can also - result from the mishandling of a confluence of events that arise during - product operation.

- -

The goal of testing is to create conditions that make failures observable. Once a failure is observed, it is the responsibility of developers, or testers in a development role, to debug these failures, locate the faults, and implement fixes.

- -

Root cause analysis extends beyond the scope of development and test. It - involves examining project workflows to understand why a fault exists in the - product. Typically, root cause analysis will identify a root cause that, if - "fixed," would not eliminate the potential for a failure to be observed in - the current or near-term releases. Consequently, root cause analysis is - generally not a priority for design and testing but instead falls within the - domain of project management.

- -

A technique commonly used to increase the variety of conditions—and thus the likelihood of creating conditions that reveal faults—is to run more tests with different inputs. This is called increasing the test coverage.

- -

The Mosaic tool assists testers in finding failures, but it does not directly help with identifying the underlying fault that led to the failure. Mosaic is a tool for testers. However, these two tasks—finding failures and locating faults—are not entirely separate. Knowing where a failure occurs can provide the developer with a good starting point for locating the fault and help narrow down possible causes. Additionally, once a developer claims to have fixed a fault, that claim can be verified through further testing.

- -

Testing Objectives

- -
    -
  • - Verification Testing
    - Purpose: To confirm that the software or system meets the specified requirements and design. Verification testing ensures that each component behaves as expected according to specifications, often conducted throughout development to catch any deviations from the original plan. -
  • - -
  • - Regression Testing
    - Purpose: To ensure that recent changes or additions to the codebase have not introduced new errors. This type of testing checks that previously tested functionalities still work as intended, making it essential for maintaining stability as updates are made. -
  • - -
  • - Development Testing
    - Purpose: To evaluate code correctness and functionality during the development process. Development testing is often exploratory, allowing developers to check whether their code performs as expected before formal testing. It can include unit testing, integration testing, and other quick checks to validate functionality on the fly. -
  • - -
  • - Exploratory Testing
    - Purpose: To uncover unexpected issues by testing the software in an unscripted manner. Exploratory testing allows testers to investigate the software's behavior outside of planned test cases, often discovering edge cases or flaws that structured tests may miss. -
  • - -
  • - Performance Testing
    - Purpose: To assess how the software performs under expected and extreme conditions. Performance testing evaluates response times, resource usage, and stability, often covering areas like load, stress, and scalability testing. This objective ensures the system can handle the demands it will face in production. -
  • - -
  • - Compliance Testing
    - Purpose: To confirm that the software adheres to regulatory, legal, and industry standards. Compliance testing ensures that the system meets external requirements, which may include accessibility, data privacy, and industry-specific standards. -
  • - -
  • - Security Testing
    - Purpose: To identify vulnerabilities and ensure the software is protected against unauthorized access and threats. Security testing checks for risks like data breaches, weak authentication, and exposure to known vulnerabilities, helping to safeguard sensitive information and user privacy. -
  • - -
  • - Compatibility Testing
    - Purpose: To verify that the software works across different environments, devices, and platforms. Compatibility testing ensures consistent functionality and appearance across browsers, operating systems, hardware configurations, and other setups. -
  • - -
  • - Acceptance Testing
    - Purpose: To determine if the software meets the end user's needs and expectations. Acceptance testing, often conducted by stakeholders or QA teams, validates that the software is usable and functional from a real-world perspective, acting as the final check before release. -
  • - -
  • - Documentation Testing
    - Purpose: To ensure that all documentation, guides, and user manuals are accurate and reflect the current software functionality. Documentation testing verifies that users have clear, up-to-date information for effective usage and troubleshooting. -
  • - -
  • - Usability Testing
    - Purpose: To confirm that the software is user-friendly and intuitive. Usability testing focuses on the ease of use, ensuring that end users can navigate and interact with the software without unnecessary friction, leading to a positive user experience. -
  • - -
- -

The Moasic Testbench is useful for any type of testing that can be - formulated as test routines testing RUTs. This certainly includes - verification, regression, development, exploratory testing. It will - include the portions of performance, compliance, security, compatibility, - and acceptance testing that fit the model of test routines and RUTs. Only - recently has can it be imagined that the Mosaic TestBench can be used with - documentation testing. However, it is now possible to fit an AI API into a - test routine, and turn a document into a RUT. Usability testing often - depends in other types of tests, so to this extent the Mosaic Testbench - can play a role. However, usability is often also in part feedback from - users. So short of putting users in the Matrix, this portion of usability - testing remains outside the domain of the Mosaic Testbench, though come to - think of it, the Mosaic Testbench could be used to reduce surveys to pass - fails.

- -

Each test objective will lead to writing tests of a different nature.

- - -

Unstructured Testing

- -

This section outlines some common approaches - to unstructured testing, often referred to - as black box testing. Black boxes are inherent - in even the most structured testing approaches, as at the lowest levels of - analysis, elements will always remain opaque. Even in the most highly - detailed test of logic possible, one that examines a RUT down to the - individual logic gates, each gate would be treated as a black box.

- -

Reference Output Based Testing

- -

In reference output based testing, an ordering - is assigned to the inputs for - the routine under test, as well as to - its outputs. Through this ordering the inputs - and outputs become vectors. Thus the routine under test is given - an input vector and it returns - an observed output vector.

- -

A Reference Model is then - given the same input vector, and then it - produces a reference output vector. The reference - output vector has the same component ordering as the - observed output vector. - -

The failure detection function then compares - each observed output vector with its corresponding reference output vector. If - they do not match, the test is deemed to have failed.

- -

It follows that in reference output based testing, the accuracy of the - test results depends solely on the accuracy of the Reference Model.

- -

When the implementation of the Reference Model is unrelated to the - routine under test, we tend to expect that the errors produced by the - Reference Model will be uncorrelated with those produced by the routine - under test, and thus not probable to coincide. This property will bias - test routines towards delivering false fails. As noted earlier, false fails are - likely to be caught as test fails are followed up with further - scrutiny. It follows that reference output based testing can potentially - deliver a high degree of accuracy even though the reference model is not - ideal.

- -

Property Check Testing

- -

Property Check Testing is an alternative to - reference output based testing. Here, rather than comparing each observed - output to a reference output, the observed output is validated against - known properties or expected characteristics.

- -

For example, given an integer as input, a function that correctly squares - this input will preserve the parity of the input, as an odd number squared - will be odd, and an even number squared will be even. The failure decider - can check this property for each test case, and if it does not hold, the - test case fails.

- -

Note for the square RUT test, this proposed property check is weak. Given - a uniform distribution, half the time an errant square will still have the - correct parity. There are stronger property checks that could be done for - squares, but the point here is one of illustration. A weak property check - would not recognize many failures, and thus be biased towards false pass - decisions. Those are the bad ones, as passing tests typically receive no - further scrutiny.

- -

Spot Checking

- -

In spot checking, the function under test is checked against one or two - input vectors. When using a black box approach, these are chosen at - random.

- -

Moving from zero to one is an finite relative change, i.e., running a - program for the first time requires that many moving parts work together, - parts that have never been tried before; hence, a tremendous amount is - learned about the logic and setup when the first test runs. Such a first - test is called a smoke test, a term that - has literal meaning in the field of electronics testing.

- -

Exhaustive Testing

- -

A test routine will potentially run multiple test cases against a given - RUT. If the RUT is a pure function, then per test case, a single test - vector will be given to the RUT, and a single output vector will be - returned. However, if the RUT is sequential in nature, for each test case - there will be a sequence of input vectors, and potentially a sequence of - output vectors.

- -

The set of possible inputs for a RUT, were members are either individual - vectors, or vector sequences, constitutes the input - space. Test coverage is typically given - as the proportion or inputs tested to the total in the input space, - reported as a percentage./p> - -

When the RUT is a pure function, the input space is an enumeration of all - possible input vectors. If the inputs include arbitrary long strings, then it - will not be possible to complete such an enumeration, the best that can - be done is to generate more and more inputs upon demand. -

- -

When the RUT has sequential behavior, achieving full coverage requires - giving the RUT every possible starting input, and then sequencing it to a - point of hitting a stop state or cycle state in every possible way. Again - if inputs can be arbitrarily long strings, such an enumeration can not be - completed. Furthermore, if the RUT state is encapsulated unseen in a black - box, it might be very difficult, or impossible, to detect when the state - has cycled.

- -

Exhaustive testing is said to have been - done when every single input in the input space has been tested. - An exhaustive test will have obtained 100% coverage, with no rounding - done in the coverage computation.

- -

Suppose that a fault appears at time t₀. Suppose there is a duration of - time of interest, Δ, that begins at or later than t₀. Suppose further - there exists a given test and test case that fails due to the fault, but - would not otherwise fail. Then a failure is - reproducible during Δ, if and only if the given test and test case - would fail if run at any time during Δ, and no matter how many times it is - run.

- -

For a RUT that is a pure function, this definition is the same as saying - the test case fails at the same input value every time during Δ, when - ideally is should have passed. For a sequential RUT, it is saying that the - same input vector sequence will always lead to a failure, when ideally it - would lead to a pass.

- -

Although the same test routine is run with identical inputs, a failure - might not be reproducible due to other sources of variability, as - examples:

-
    -
  1. The contract made with the programmer for using the exact same - inputs for the exact same test routine was broken. -
  2. Use of uninitialized memory. -
  3. Software updates or platform changes in between test runs during Δ. -
  4. Green thread, or real thread, scheduling differences, whether done by the OS or by the interpreter. -
  5. Using the system time as data, or other system parameter. -
  6. Race conditions. -
  7. Getting values from a randomly seeded pseudo random number generator.
  8. -
  9. Reaching out of the architecture model for values, as examples - using performance measures or by timing events.
  10. -
  11. A hardware fault that is sensitive to a myriad of possible environmental - influences.
  12. -
- -

Exhaustive testing will find all failures that are reproducible. It might - find failures that are not reproducible. The probability of witnessing - non-reproducible failures will typically go up when using the technique - of over testing, i.e. running even more than an - exhaustive number of tests.

- -

Structured Testing

- -

Structured testing is a form of white box testing, where the tester - examines the code being tested and applies various techniques to it - to increase the efficiency of the testing.

- -

The Need for Structured Testing

- -

All types of black-box testing have a serious problem in that the search - space for failures grows exponentially as the number of inputs grows. You have - probably heard about this sort of thing before, but you might not appreciate - just how severe the situation is. To illustrate, we will consider the simplest of - programs, one that adds two numbers. When the RUT is a black box, the test routine - only has access to the interface, so it appears like this:

- -

-        int8 sum(int8 a, int8 b){
-        ...
-        }
-    
- -

Here, two int8 values are being added, so an input test vector will have - 16 bits. The result is also an int8, so an output vector will have 8 bits.

- -

As the internals of the RUT are unknown, it could contain unexpected logic, like this:

- -

-        int8 sum(int8 a, int8 b){
-        if(a == 248 && b == 224) return 5;
-        else return a + b;
-        }
-    
- -

A developer might not be writing malicious code when something like this - appears; instead, the code might have been pulled from somewhere else and - dropped in. There could have been a special case in this situation on another - machine. Perhaps the code was generated by an AI, or it could be leftover - debug information. This example illustrates that testers are typically not - responsible for understanding developer code. Though in this case the logic - is obvious, there can be more obscure functions that testers cannot take the - time to understand, which might exhibit similar unexpected behavior.

- -

As this is a black box, the numbers 248 and 224 are not known to the test writer. - Therefore, the only effective unstructured testing approach that is guaranteed to - find this failure is exhaustive testing.

- -

Exhaustive testing is feasible here. An input test vector with 16 bits will lead to - an input space of 65,536 points. Sixty-five thousand tests is trivial for a modern - desktop. The full test will take about 100 microseconds, and in this time the test - routine is guaranteed to find all failures. Note that in 50 microseconds, half of - the input space will be covered, so there is a 0.5 probability of finding a single - failure within that time. Generally, half the total time corresponds to a 0.5 probability - of finding a single failure.

- -

Now, suppose that instead of looking for a reproducible fault, we have:

-

-      int8 sum(int8 a, int8 b){
-        if(a == 255 * rand() && b == 224 * rand()) return 5;
-        else return a + b;
-      }
-    
- -

In this case, to find the fault, the test routine must guess the values of two independent - 8-bit random variables from a uniform distribution. As they are independent, we can combine - them and note that the test must guess a 16-bit value. If we consider an "exhaustive" test, - the tester will make 2^16 tries. Hence, the probability of finding this failure is:

- -

-        1 - (1 - 2-16)216 = 0.6321...
-    
- -

A small adjustment to the above equation is necessary to make it precise, because - sometimes 5 is the correct answer. Thus, with 216 test cases, there will - be certainty (a probability of 1.0) in finding all reproducible errors and about - a 0.63 probability of finding a single random fault. The two probabilities are not - as far apart as one might expect, given that the failure is "jumping around."

- -

Now, let's go back to the reproducible error case, but this time, suppose we are working - with an int16:

- -

-      int16 sum(int16 a, int16 b){
-        ...
-      }
-    
- -

Now an input vector has 32 bits, giving an input space with 21,474,836,480 points. - Our computer will require about 33 seconds of compute time for this. Adding around - 10 seconds for wall-clock time, let’s call it 40 seconds. Testing would be barely - practical if it took 40 seconds to test such a simple RUT as this, but perhaps we - would invest in a faster computer?

- -

-      int32 sum(int32 a, int32 b){
-        ...
-      }
-    
- -

Now, suppose we are adding 32-bit numbers. The input space now has 18,446,744,073,709,551,616 points. - Compute time, without overhead, will be about 4,496 years! Suffice it to say, we have discovered that - testing the addition of two 32-bit numbers exhaustively is impractical. Even if we break the problem - into 1,000 pieces on different processors and use a state-of-the-art server farm, it would still take - months and cost a significant amount. What will you tell the boss?

- -

But wait! What if we move to 64-bit computing?

- -

-        int64 sum(int64 a, int64 b){
-        ...
-        }
-    
- -

The input space now has:

-

-        340,282,366,920,938,463,463,374,607,431,768,211,456
-    
-

points. That's about 340 undecillion. Compute time is 83 sextillion years—or about - 6 trillion times the age of the universe. Even with all the processing power on Earth, - even if you're willing to accept a probability of 0.1 of finding the failure, it would - take a thousand times longer than the age of the universe to test a function as simple - as adding two numbers. Clearly, there must be a better approach.

- - -

Summary Table

- - - - - - - - - - - - - - - - - - - - - - - - - - - -
BitsInput SpaceCompute Time
8 bits6.55 x 104100 μs
16 bits2.15 x 101033 s
32 bits1.84 x 10194,496 years
64 bits3.40 x 10386 x 1012 times the age of the universe
- -

A typical response from people when they see this is that the knew it went up - fast, but did not know it went up this fast. It is also important to note, there - is a one to one relationship between percentage of time to achieving exhaustive - coverage, and percentage of coverage. Half the time, 50 percent coverage. In - the last row of the table, to have reasonable test times, there would be coverage - 10-18 percentage coverage. At that level of coverage there is really - no reason to test. Hence, this table is not limited to speaking about exhaustive - testing, rather it speaks to black box testing in general.

- -

Informed Spot Checking

- -

In white box testing, we take the opposite approach to black box - testing. The test writer does look at the code implementation and - must understand how to read the code. Take our 64-bit adder example of - the prior section. Here in this section we will apply a white box - technique known as Informed Spot Checking.

- -

This is the prior example as a black box:

- -

-      int64 sum(int64 a, int64 b){
-        ...
-      }
-    
- -

And here it is as a white box:

- -

-      int64 sum(int64 a, int64 b){
-        if(a == 5717710 && b == 27) return 5;
-        else return a + b;
-      }
-    
- -

When following the approach of Informed Spot Checking, the tester examines - the code and sees there is a special case for a = 5717710 - and b = 27, which becomes the first test case. There’s also - a special case for when the sum exceeds the 64-bit integer range, both in - the positive and negative directions; these become two more test - cases. Finally, the tester includes a few additional cases that are not - edge cases.

- -

Thus, by using white box testing instead of black box testing, the tester finds all - the failures with just 4 or so test cases instead of

-

-      340,282,366,920,938,463,463,374,607,431,768,211,456 
-     
-

cases. Quite a savings, eh?

- -

There are notorious edge cases in software, and these can often be seen - by looking at the RUT. Zeros and inputs that lead to index values just off - the end of arrays come to mind are common ones. Checking a middle value - and edge cases is often an effective approach for finding failures.

- -

There is an underlying mechanism at play here. Note that it takes two - points to determine a line. In Fourier analysis, it takes two samples per - period of the highest frequency component to determine an entire - waveform. Code also has patterns, patterns that are disjoint at edge - cases. Hence if a piece of code runs without failures for both edge cases - and spot check values in between, it will often run without failures over - an entire domain of values. This effect explains why ad hoc testing has - lead to so much relatively fail free code.

- -

Informed Spot Checking is especially valuable in early development, as it - provides useful insights with minimal investment. In the early development - stage, making more investment in test code is unwise due to the code being - in flux. Test work is likely to get ripped up and replaced.

- -

The idea of test work being ripped up and replaced highlights a drawback - of white box testing. Analysis of code can become stale when implementations - are changed. However, due to the explosion in the size of the input space - with even a modest number of inputs, white box testing is necessary if there - is to be much commitment to producing reliable software or hardware.

- -

Refactoring the RUT

- -

Refactoring a RUT to make it more testable can be a powerful method for - turning testing problems that are exponentially hard due to state - variables, or very difficult to debug due to random variables, into - problems that are linearly hard. According to this method, the - tester is encouraged to examine the RUT to make the testing problem - easier.

- -

By reconstructing the RUT I mean that we refactor the code to bring - any random variables or state variables to the interface where they - are then treated as inputs and outputs.

- -

If placing state variables on the interface is adopted as a discipline by - the developers, reconstruction will not be needed in the test phase, or if - it is needed, white box testers will see this, and it will be a bug that - has been caught. Otherwise reconstruction leads to two versions of a - routine, one that has been reconstructed, and the other that has not. The - leverage gained on the testing problem by reconstructing a routine - typically more than outweighs the extra verification problem of comparing - the before and after routines.

- -

As an example, consider our adder function with a random fault. As we - know from prior analysis, changing the fault to a random number makes - testing harder, but perhaps more importantly, it makes it nearly impossible - to debug, as the tester can not hand it to the developer and say, - 'it fails in this case'.

-

-      int64 sum(int64 a, int64 b){
-        if( a == (5717710 * rand()) && b == (27 * rand()) ) return 5;
-        else return a + b;
-      }
-    
- -

The tester refactors this function as:

-

-      int64 sum( int64 a, int64 b, a0 = 5717710*rand() ,b0 = 27*rand() ){
-        if( a == a0 && b == b0 ) return 5;
-        else return a + b;
-      }
-    
- -

Here a0 and b0 are added to the interface as - optional arguments. During testing their values will be supplied, during - production the defaults will be used. Thus, we have broken the one - test problem into two, the question if sum works, and the - question if the random number generation works.

- -

Failures in sum found during testing are now reproducible. - If the tester employs the informed spot checking the failure will - be found with few tests, and the point in the input space where the - failure occurs can be reported to development and used for debugging.

- -

Here is a function that keeps a state variable between calls.

-

-    int state = 0;
-    int call_count = 0; 
-    void state_machine(int input) {
-        int choice = (input >> call_count) & 1; 
-        switch (state) {
-            case 0:
-                printf("State 0: Initializing...\n");
-                state = choice ? 0 : 1;
-                break;
-            case 1:
-                printf("State 1: Processing Path A...\n");
-                state = choice ? 0 : 2; 
-                break;
-            case 2:
-                printf("State 2: Processing Path B...\n");
-                state = choice ? 0 : 3;
-                break;
-        }
-        call_count++;
-    }
-    
- -

The Mosaic Testbench makes standard out available to the test routine in - an array so we can capture and examine the print value while testing this - RUT. Because of the state variables, state - and count, this routine will behave differently each time it - is called. A black box test will have a large number of input vector - sequences to try. The failure occurs in the call after being in state 2 - and the count is such that the choice is to go to state 3.

- -

-    int state = 0;
-    int call_count = 0; 
-    void state_machine(int input ,int state0 = state ,int call_count0 = call_count) {
-        int choice = (input >> call_count0) & 1; 
-        switch (state0) {
-            case 0:
-                printf("State 0: Initializing...\n");
-                state = choice ? 0 : 1;
-                break;
-            case 1:
-                printf("State 1: Processing Path A...\n");
-                state = choice ? 0 : 2; 
-                break;
-            case 2:
-                printf("State 2: Processing Path B...\n");
-                state = choice ? 0 : 3;
-                break;
-        }
-        call_count = call_count0 + 1;
-    }
-    
- -

Here the test routine supplies state0 and call_count0 - as inputs. The test routine treats state and call_ccount - as outputs, so this is then a pure function. As a pure function it is a much easier - testing problem. Now instead of a combinatorially hard problem involving input - sequences, the test routine can visit each of the three states, and set the input - such that each visits the two next states. That is six test cases to see everything - that this function is capable of doing.

- -

Any time the RUT is refactored in the testing phase, it raises the - question if the refactored code maintains the required functionality. - This becomes another verification problem, which might or might not - be verified through testing. One way to manage this issue is to - take the refactoring problems back to the developers to have them - adopt the code into the project. Then it becomes the original code.

- -

Bottom Up Testing

- -

When a function corresponds directly to CPU instructions, such as is the - case for the + operator, we typically trust that it will give - the right answer. The same can be said for the call and return - dynamic. Unless we are working on a new compiler, it is typically assumed - that this works. Tests for it are not included for testing if calls work in - application program test suites. -

- -

The reason for this trust is that CPU instructions, and function calls - are already extensively tested, both directly by the manufacturers, and - through widespread use. Though this trust is not always warranted as in - the case of the Intel Pentium divider, which had failure cases.

- -

We can decompose a testing problem into trusted and untrusted components. - We call routines that are trusted building blocks, - then we use the building blocks to build up larger routines, and then - test those to create larger building blocks. At the end we will have - built up a trustworthy program.

- -

This approach parallels what developers do when they write programs. They - start with primitive programs that come with the language or from - libraries, and then they compose these to write custom functions.

- -

The following is an expansion of our adder example for creating and - testing an adder for 1024 bit numbers. For purposes of presentation, we - will refer to int256 as a type that corresponds to array of - 32 bytes, and uint1 as a 1 bit unsigned integer, i.e. 0 or - 1.

- -

-    {uint1, uint64} full_adder(uint64 a, uint64 b, uint1 c0) {
-        uint64 partial_sum = a + b;
-        uint64 sum = partial_sum + c0;
-        uint1 carry_out = (partial_sum < a) || (sum < partial_sum);
-        return {carry_out, sum};
-    }
-    
- -

Here is a 256 bit adder made from 64 bit adders.

- -

-    {uint1, int256} add_256(int256 a, int256 b) {
-        uint1 carry_in = 0;
-        int64 sum_parts[4];  // Array to store each 64-bit segment of the sum
-
-        for i = 0 to 3 {
-            // Get the i-th 64-bit segments of a and b
-            int64 a_part = (a >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
-            int64 b_part = (b >> (i * 64)) & 0xFFFFFFFFFFFFFFFF;
-
-            // Perform the full addition on each 64-bit part
-            {carry_out, sum_parts[i]} = full_adder(a_part, b_part, carry_in);
-
-            // Update carry-in for the next 64-bit segment
-            carry_in = carry_out;
-        }
-
-        int256 sum = 0;
-        for i = 0 to 3 {
-            sum |= (sum_parts[i] << (i * 64));
-        }
-
-        return {carry_in, sum};
-    }
-    
- -

According to the bottom up technique, we first test - the full_adder, which is not a difficult testing problem. It - employs well known trusted operations, and has a couple of interesting - special case conditions. Given the numeric nature of this code, these - special case conditions are probably better verified by proof than by - testing, but they can be tested.

- -

Once the full_adder can be trusted, testing add_256 - reduces to checking that the various 64 bit parts are extracted and then - packed correctly, - and are not, say, offset by one, and that the carries are properly communicated - during the add.

- -

Note this test also trusts the fact that ripple carry addition is a valid - algorithm for assembling the pieces. Thus there is a new verification - problem, that for the algorithm. In this case, ripple carry addition is - already a trusted algorithm.

- -

Testing of full_adder could be further simplified with - refactoring, by moving the loop control variables to the interface and the - carry_in and carry_out to the interface. - As i is recycled, it would become two variables, - say i and j. Once the loop control variables - are on the interface it is straight forward to test the packing. Once the - carries are on the interface it is straight forward to test the - carries.

- -

In general all programs and circuits can be conceptualized as functional - units, channels, and protocols. A test that shows that these work as specified, - shifts the test problem from the RUT to the specification.

- -

Adding to the code

- -

It is a common practice to add property checks to the code for gathering - data about failures or other potential problems. These will then write to - log files, or even send messages back to the code maintainers. By doing - this the testers benefit from the actual use of the product as though it - were a test run. When failures are found, such code might then trigger - remedial or recovery actions.

- -

About Reference Outputs and Reference Properties

- -

When testing during development, reference outputs often come from the - developers or testers themselves. They know what they expect from the - routines, but they do not know if the code will meet these expectations, - so they write tests. Typically, they try to imagine the hardest possible - cases. However, sometimes a young developer avoids testing challenging - cases to sidestep the risk of failures—this is, of course, a poor approach - that can lead to undetected issues.

- -

Often, specification authors provide reference outputs or extensive test - suites that must be passed to achieve certification. Architects also - contribute by creating multi-level specifications—for the entire program, - for the largest components, and for communication protocols between - components. These specifications often serve as high-quality reference - outputs and property checks that can be applied to the model during testing. - The goal of developers and testers is to meet these specifications, making - failures directly relevant to the development process and program design.

- -

Experts in a specific area sometimes provide test data, maintaining - a database of reference data as a resource for validating outputs. - For some types of code, experts also supply property checks, which - evaluate whether outputs satisfy essential properties rather than specific - values. Depending on the domain, these properties can be an important aspect - of the testing process.

- -

Each time a bug is found, a test should be created to capture a failure - related to that bug. Ideally, such tests are written with minimal - implementation-specific details so they remain relevant even after code - changes. These tests are then added to a regression testing suite, ensuring - that future changes do not reintroduce the same issues.

- -

For applications involving multi-precision arithmetic, such as the earlier - adder example, reference data is often sourced from another established - multi-precision library, whether an open-source or commercial product. The - assumption is that an existing product will be more reliable than a newly - developed one, and since it’s implemented differently, its errors are likely - to be uncorrelated. This competitive testing, which is aspect of - compatibility testing, here being used for other objectives. In the limit, as - the RUT matures, this approach will tend to identify bugs in the reference - data from the other company as often it does in the RUT, which might be an - interesting effect.

- -

In some cases, reference data comes from historical sources or existing - systems. When upgrading or replacing a legacy system, historical data - serves as a benchmark for comparison. Similarly, industry standards - and compliance datasets, particularly from regulatory organizations - like IEEE, NIST, or ISO, provide reliable reference points for applications - requiring standardized outputs. Compliance-driven tests are often required - for certification or regulatory approval in fields such as finance, - healthcare, and aerospace.

- -

For cases requiring many inputs without needing specific reference values, - random number generators can provide extensive test data. Examples include in - comparative testing and when property checking. Random number generators can - also be configured to concentrate cases in specific areas of the input domain - that for some reason concerns the testers.

- -

Customer and user feedback sometimes uncovers additional test cases, - especially when dealing with complex or evolving software. Feedback - reveals edge cases or expected behaviors that developers and testers - may not have anticipated, allowing teams to create reference points - for new test cases that cover real-world use cases and address user needs.

- -

Conclusion

- -

If you are a typical tester or developer reading through the previous list, - you might feel a bit disappointed. Unless you work in a specialized area, - are attempting to create a compatible product, or need to exercise the hardware, much - of that list might seem inapplicable. For many developers, the most - applicable advice remains: "During development, reference outputs often - come from the developers or testers themselves." I apologize if this seems - limiting, but consider this: the reason we run programs is to generate the - very data we're looking for. If that data were easily available, we wouldn’t - need the program.

- -

In many ways, testing is about making developers and testers the first - users of the product. All products will have bugs; it’s far better for - experts to encounter these issues first.

- -

Testing also facilitates communication among project members. Are the - architects, developers, and testers all on the same page about how the - product should work? The only way to find out is to run what has been built - and observe it in action. For this, we need test cases.

- -

This circular problem—finding data that our program should generate - to test - the program itself — illustrates a fundamental limitation in software testing. - We encountered this in the discussion on unstructured, black-box testing: as - soon as we open the box to inspect the code, we are no longer just testing it, - but reasoning about it and even verifying it formally.

- -

This, perhaps, hints at a way forward. Our program is a restatement of the - specification in another language. Verification, then, is an equivalence - check. We can run examples to demonstrate equivalence, but black-box testing - alone will have limited impact. Alternatively, we can examine our code and - try to prove that it matches the specification. Though challenging, this - approach is far more feasible than waiting ten times the age of the universe - to confirm our solution through black box testing.

- -

Think of testing as a reasoning problem. Explain why the routine works and - how it contributes to meeting the specification. Work from the top down: if - the high-level components behave correctly, the program will meet the - specification. That’s the first step. Then explain why the breakdown of - those top-level components ensures correct behavior. Continue this process, - and then use tests to validate each link in this chain of reasoning. In this - way, you can generate meaningful reference values.

- -
- - - - diff --git "a/document\360\237\226\211/LICENSE.txt" "b/document\360\237\226\211/LICENSE.txt" deleted file mode 120000 index 4ab4373..0000000 --- "a/document\360\237\226\211/LICENSE.txt" +++ /dev/null @@ -1 +0,0 @@ -../LICENSE.txt \ No newline at end of file diff --git "a/document\360\237\226\211/README.txt" "b/document\360\237\226\211/README.txt" deleted file mode 120000 index ecfa029..0000000 --- "a/document\360\237\226\211/README.txt" +++ /dev/null @@ -1 +0,0 @@ -../README.txt \ No newline at end of file diff --git "a/document\360\237\226\211/example_proxy.java" "b/document\360\237\226\211/example_proxy.java" deleted file mode 100644 index 7c7d0ad..0000000 --- "a/document\360\237\226\211/example_proxy.java" +++ /dev/null @@ -1,26 +0,0 @@ -// 1. - -Mosaic_AllMethodsPublicProxy proxy = new Mosaic_AllMethodsPublicProxy(SomeClass.class); - -String methodName = "compute"; -Class returnType = int.class; -Object[] args = {42, 15}; - -Object result = proxy.invoke(someInstance, methodName, returnType, args); -System.out.println(result); - - -// 2. - -Method method = SomeClass.class.getDeclaredMethod("compute", int.class, int.class); -FunctionSignature sigFromReflection = new FunctionSignature(method); - -FunctionSignature sigFromInvocation = new FunctionSignature( - "com.example.SomeClass", - "compute", - int.class, - new Object[]{42, 15} -); - -System.out.println(sigFromReflection.equals(sigFromInvocation)); // Should be true - diff --git "a/document\360\237\226\211/see_also" "b/document\360\237\226\211/see_also" deleted file mode 120000 index 02eb81c..0000000 --- "a/document\360\237\226\211/see_also" +++ /dev/null @@ -1 +0,0 @@ -../tool_shared/third_party/RT-project-share/document🖉/ \ No newline at end of file diff --git "a/document\360\237\226\211/todo.txt" "b/document\360\237\226\211/todo.txt" deleted file mode 100644 index c71e6ea..0000000 --- "a/document\360\237\226\211/todo.txt" +++ /dev/null @@ -1,63 +0,0 @@ - -1. More languages support, notably nodejs. - -2. This message: - - Running Test_File_0...Structural problem: unpack_file_path_0 does not accept a single IO argument. - Error: unpack_file_path_0 has an invalid structure. - - Perhaps only the second one, getting rid of 'Error:' - - "Bad type signature for method: unpack_file_path_0 does not accept a single IO argument." - -3. TestBench -> Testbench perhaps? - -4. fix emacs.el so that jdbx doesn't always start with Test_Util. (note the - prior start can be found with M-x p - - -5. should include a tool for aggregating test suite runs - FYI, I'm using - -6. need an example .bashrc for setting the prompt now that I removed references -to the 'resource' project and plan to deprecate it. - -7. should check stdin for unused input and report error also. - -8. move 'unlock_class` from Ariadne tests to Mosaic_Util. - -9. consider adding Maven for the third party tools dependencies. - -10. really should move the developer/release 'install' script to the bespoke🖉/env script - -2024-12-10T14:25:40Z - - Gives an error, but still passes? Is this right? - ... Running Test_Testbench - Expected output: Structural problem message for dummy_invalid_return_method. - Structural problem: dummy_invalid_return_method does not return Boolean. - Test_Testbench Total tests run: 3 - Test_Testbench Total tests passed: 3 - Test_Testbench Total tests failed: 0 - -2024-12-13T02:48:08Z - - Java has a love-hate-relationship with reflections ability to access - private methods; however, access is needed for white box testing. It - might be in the future for this feature to work that the Mosaic - project will have to expanded into the same module as that being - tested, rather than having its jar file accessed through the class - path. - - -2024-12-16T10:47:06Z - - FunctionSignature used with AllMethodsPublic currently does not - include the return type. It needs to have that. - -2024-12-20T06:09:38Z - - For Mosaic_Dispatcher, might be best to ditch the map and do lookup - to get the handle upon each call to a method, as we already have to - do the lookup to get the information for constructing the signature - for lookup. diff --git a/env_administrator b/env_administrator new file mode 100644 index 0000000..2ff06ed --- /dev/null +++ b/env_administrator @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") +if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "$script_afp:: This script must be sourced, not executed." + exit 1 +fi + +source tool_shared/bespoke🖉/env +source tool🖉/env $@ + diff --git "a/env_administrator\360\237\226\211" "b/env_administrator\360\237\226\211" deleted file mode 100644 index 2ff06ed..0000000 --- "a/env_administrator\360\237\226\211" +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") -if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - echo "$script_afp:: This script must be sourced, not executed." - exit 1 -fi - -source tool_shared/bespoke🖉/env -source tool🖉/env $@ - diff --git a/tester/document/#build_run_transcript_v1.1.txt# b/tester/document/#build_run_transcript_v1.1.txt# new file mode 100644 index 0000000..6bfa707 --- /dev/null +++ b/tester/document/#build_run_transcript_v1.1.txt# @@ -0,0 +1,75 @@ +This shows all tests passing. + +Tests named `test_failure_` should fail. We need to know that the `TestBench` +can fail tests, so this is part of testing the `TestBench`. + +Staring the environment: + +2024-11-08T07:41:48Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> bash + +2024-11-08T07:41:51Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> cd Mosaic + +2024-11-08T07:41:54Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> . env_tester +REPO_HOME /var/user_data/Thomas-developer/Mosaic +PROJECT Mosaic +ENV tool_shared/bespoke/env +ENV tester/tool/env + +2024-11-08T07:42:04Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> emacs & + +Running the tests: + +2024-11-08T09:58:40Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass_0$TestSuite.class' scratchpad/Test_MockClass_0.class scratchpad/Test_Testbench.class scratchpad/Test_Util.class ++ rm jvm/Test_Mosaic.jar ++ rm shell/Test0 shell/Test_IO shell/test_log.txt shell/Test_MockClass_0 shell/Test_Testbench shell/Test_Util ++ set +x +clean_build_directories done. + +2024-11-08T09:58:46Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> make +Compiling files... ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ javac -g -d scratchpad javac/Test0.java javac/Test_IO.java javac/Test_MockClass_0.java javac/Test_Testbench.java javac/Test_Util.java ++ jar cf jvm/Test_Mosaic.jar -C scratchpad . ++ set +x +Creating shell wrappers... +tester/tool/make done. + +2024-11-08T09:58:50Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> run_tests +Running Test0...Test0 passed +Running Test_Util...Test_Util passed +Running Test_IO...Test_IO passed +Running Test_Testbench...Expected output: Structural problem message for dummy_invalid_return_method. +Structural problem: dummy_invalid_return_method does not return Boolean. +Test_Testbench Total tests run: 3 +Test_Testbench Total tests passed: 3 +Test_Testbench Total tests failed: 0 + +Running Test_MockClass_0...Test failed: 'test_failure_0' reported failure. +Structural problem: test_failure_1 does not return Boolean. +Error: test_failure_1 has an invalid structure. +Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException +Test failed: 'test_failure_3' produced extraneous stdout. +Test failed: 'test_failure_4' produced extraneous stderr. +Total tests run: 9 +Total tests passed: 4 +Total tests failed: 5 + +2024-11-08T09:58:55Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> diff --git a/tester/document/Writing a test.txt b/tester/document/Writing a test.txt new file mode 100644 index 0000000..d2d02ed --- /dev/null +++ b/tester/document/Writing a test.txt @@ -0,0 +1,50 @@ + +I did not use Mosaic to test itself, although Test_MockClass_0 comes close. + +TestMockClass has the general form of a test that uses Mosaic, though MockClass +itself does not exist. This general form: + +*. For block testing there conventionally be a correspondence between + The test classes and the a class being tested, so each test class will + be named: + + `Test__'. + + Typically the lowest `number` will be zero, and it will correspond to + smoke tests. + +* A `Test__' class will have inside of it another class + called `TestSuite`. By convention each method in this class is a test routine. For block + testing a test routine will has a correspondence to the method being tested, + and has the name: + + `test__`. + + This convention is not always followed, no that in `Test_MackClass_0` you will + notice that tests are named after the expected results rather than a method + that is being tested. + + Test routines can run a number of tests on a RUT, each of which is referred to + as a test case. So we have this hierarchy: + + `Test__' > `TestSuite` > test_routine > test_case + +*. The main call for a Test class will parse arguments and options, setup + the testing environment, make a `TestSuite` object, pass said object to + the `TestBench`, then take the return value from the `Testbench`, and set + the return value from the test. + +* A test routines will return `true` if the test passes. Any other return + value, any uncaught exception, or anything left on the stdout or stderr + will cause the test to be interpreted as a failure. (On the todo list is + an item to make unused stdin an error as well.) + +* A test reoutine (nor the contained test cases) should not themselves print + any messages. Generally it has always been this way, even before the Testbench + redirected and the i/o streams. Rather the test should simply return true + for a pass. This is because in testing we are looking for function failures, + and not for faults. The fault will be searched for later in the debugger. + + If a test routine has an internal error, such that the routine itself + has a problem (not the RUT it is testing), this can be put in a log + entry. See the Mosaic_Util for the log methods. diff --git a/tester/document/build_run_transcript_2024-12-20T06:09:38Z.txt b/tester/document/build_run_transcript_2024-12-20T06:09:38Z.txt new file mode 100644 index 0000000..689546b --- /dev/null +++ b/tester/document/build_run_transcript_2024-12-20T06:09:38Z.txt @@ -0,0 +1,220 @@ +2024-12-20T06:09:38Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> clean; make; run ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ rm_na log/log.txt +rm_na: cannot remove 'log/log.txt': No such file or directory ++ rm_na -r scratchpad/Dispatcher_0.class scratchpad/Dispatcher_1.class scratchpad/Dispatcher_2.class scratchpad/Dispatcher_3.class scratchpad/IO.class 'scratchpad/IsPrimitive$TestSuite$1TestEnum.class' 'scratchpad/IsPrimitive$TestSuite.class' scratchpad/IsPrimitive.class 'scratchpad/Logger$TestSuite.class' scratchpad/Logger.class 'scratchpad/MockClass_0$TestSuite.class' scratchpad/MockClass_0.class scratchpad/smoke.class scratchpad/Testbench.class scratchpad/tester scratchpad/Util.class ++ rm_na jvm/Dispatcher_0 jvm/Dispatcher_1 jvm/Dispatcher_2 jvm/Dispatcher_3 jvm/IO jvm/IsPrimitive jvm/Logger jvm/MockClass_0 jvm/smoke jvm/Testbench jvm/Util ++ rm_na jdwp_server/Dispatcher_0 jdwp_server/Dispatcher_1 jdwp_server/Dispatcher_2 jdwp_server/Dispatcher_3 jdwp_server/IO jdwp_server/IsPrimitive jdwp_server/Logger jdwp_server/MockClass_0 jdwp_server/smoke jdwp_server/Testbench jdwp_server/Util ++ set +x +clean done. +++ realpath /var/user_data/Thomas-developer/Mosaic/tester/tool🖉/make ++ script_afp=/var/user_data/Thomas-developer/Mosaic/tester/tool🖉/make ++ env_must_be=tester/tool🖉/env ++ '[' tester/tool🖉/env '!=' tester/tool🖉/env ']' ++ echo 'Compiling files...' +Compiling files... ++ set -x ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ javac -g -d scratchpad javac🖉/TestClasses_0.java javac🖉/TestClasses_1.java javac🖉/TestClasses_2.java +++ list ++ list='smoke Logger Util IO Testbench MockClass_0 IsPrimitive Dispatcher_0 Dispatcher_1 Dispatcher_2 Dispatcher_3' ++ for file in $list ++ javac -g -d scratchpad javac🖉/smoke.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/Logger.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/Util.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/IO.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/Testbench.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/MockClass_0.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/IsPrimitive.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/Dispatcher_0.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/Dispatcher_1.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/Dispatcher_2.java ++ for file in $list ++ javac -g -d scratchpad javac🖉/Dispatcher_3.java ++ set +x +Making jvm scripts ... +Making jdwp debug server scripts... +tester/tool🖉/make done. +list: smoke Logger Util IO Testbench MockClass_0 IsPrimitive Dispatcher_0 Dispatcher_1 Dispatcher_2 Dispatcher_3 + +... Running smoke +Test0 passed + +... Running Logger +Exception in thread "main" java.lang.NoClassDefFoundError: org/slf4j/LoggerFactory + at com.ReasoningTechnology.Mosaic.Mosaic_Logger.(Mosaic_Logger.java:12) + at Logger$TestSuite.smoke_test_logging(Logger.java:9) + at Logger.main(Logger.java:21) +Caused by: java.lang.ClassNotFoundException: org.slf4j.LoggerFactory + at java.base/jdk.internal.loader.BuiltinClassLoader.loadClass(BuiltinClassLoader.java:641) + at java.base/jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass(ClassLoaders.java:188) + at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:528) + ... 3 more + +... Running Util +Util passed + +... Running IO +IO passed + +... Running Testbench +Expected output: Structural problem message for dummy_invalid_return_method. +Structural problem: dummy_invalid_return_method does not return Boolean. +Testbench Total tests run: 3 +Testbench Total tests passed: 3 +Testbench Total tests failed: 0 + +... Running MockClass_0 +Test failed: 'test_failure_0' reported failure. +Structural problem: test_failure_1 does not return Boolean. +Error: test_failure_1 has an invalid structure. +Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException +Test failed: 'test_failure_3' produced extraneous stdout. +Exception in thread "main" java.lang.NoClassDefFoundError: org/slf4j/LoggerFactory + at com.ReasoningTechnology.Mosaic.Mosaic_Logger.(Mosaic_Logger.java:12) + at com.ReasoningTechnology.Mosaic.Mosaic_Testbench.run_test(Mosaic_Testbench.java:74) + at com.ReasoningTechnology.Mosaic.Mosaic_Testbench.run(Mosaic_Testbench.java:95) + at MockClass_0.main(MockClass_0.java:94) +Caused by: java.lang.ClassNotFoundException: org.slf4j.LoggerFactory + at java.base/jdk.internal.loader.BuiltinClassLoader.loadClass(BuiltinClassLoader.java:641) + at java.base/jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass(ClassLoaders.java:188) + at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:528) + ... 4 more + +... Running IsPrimitive +Total tests run: 15 +Total tests passed: 15 +Total tests failed: 0 + +... Running Dispatcher_0 +making map for TestClasses_0 +Mosaic_Dispatcher:: mapping methods given class_metadata object: tester.TestClasses_0 +MethodSignature_To_Handle_Map::add_class adding methods +(add_entry:: (key boolean tester.TestClasses_0.a_public_method_1()) (value MethodHandle(TestClasses_0)boolean)) +(add_entry:: (key boolean tester.TestClasses_0.a_public_static_method_7()) (value MethodHandle()boolean)) +(add_entry:: (key boolean tester.TestClasses_0.a_private_static_method_9()) (value MethodHandle()boolean)) +(add_entry:: (key boolean tester.TestClasses_0.a_private_method_2()) (value MethodHandle(TestClasses_0)boolean)) +MethodSignature_To_Handle_Map::add_class adding constructors +(add_entry:: (key void tester.TestClasses_0.()) (value MethodHandle()TestClasses_0)) +MethodSignature_To_Handle_Map::add_class adding fields + +running test: publicClass_publicMethod +Call to Mosaic_Dispatcher::dispatch for a method bound to an instance. +dispatch_1:: signature key:boolean tester.TestClasses_0.a_public_method_1() +passed + +running test: make_0 +Mosaic_Dispatcher:: mapping methods given class_metadata object: tester.TestClasses_1 +MethodSignature_To_Handle_Map::add_class adding methods +(add_entry:: (key int tester.TestClasses_1.get_i()) (value MethodHandle(TestClasses_1)int)) +MethodSignature_To_Handle_Map::add_class adding constructors +(add_entry:: (key void tester.TestClasses_1.(int ,int)) (value MethodHandle(int,int)TestClasses_1)) +(add_entry:: (key void tester.TestClasses_1.(int)) (value MethodHandle(int)TestClasses_1)) +(add_entry:: (key void tester.TestClasses_1.()) (value MethodHandle()TestClasses_1)) +MethodSignature_To_Handle_Map::add_class adding fields +Call to Mosaic_Dispatcher::make +dispatch_1:: signature key:void tester.TestClasses_1.() +Call to Mosaic_Dispatcher::make +dispatch_1:: signature key:void tester.TestClasses_1.(int) +Call to Mosaic_Dispatcher::make +dispatch_1:: signature key:void tester.TestClasses_1.(int ,int) +passed + +running test: publicStaticMethod_7 +Call to Mosaic_Dispatcher::dispatch for a static method. +dispatch_1:: signature key:boolean tester.TestClasses_0.a_public_static_method_7() +passed + +running test: privateStaticMethod_9 +Call to Mosaic_Dispatcher::dispatch for a static method. +dispatch_1:: signature key:boolean tester.TestClasses_0.a_private_static_method_9() +passed + +running test: defaultClass_access +Mosaic_Dispatcher:: mapping methods from class specified by string: "tester.DefaultTestClass_01" +MethodSignature_To_Handle_Map::add_class adding methods +(add_entry:: (key boolean tester.DefaultTestClass_01.a_public_method_7()) (value MethodHandle(DefaultTestClass_01)boolean)) +(add_entry:: (key boolean tester.DefaultTestClass_01.a_private_method_8()) (value MethodHandle(DefaultTestClass_01)boolean)) +MethodSignature_To_Handle_Map::add_class adding constructors +(add_entry:: (key void tester.DefaultTestClass_01.()) (value MethodHandle()DefaultTestClass_01)) +MethodSignature_To_Handle_Map::add_class adding fields +Call to Mosaic_Dispatcher::make +dispatch_1:: signature key:void tester.DefaultTestClass_01.() +Call to Mosaic_Dispatcher::dispatch for a method bound to an instance. +dispatch_1:: signature key:boolean tester.DefaultTestClass_01.a_public_method_7() +Call to Mosaic_Dispatcher::dispatch for a method bound to an instance. +dispatch_1:: signature key:boolean tester.DefaultTestClass_01.a_private_method_8() +passed + + +... Running Dispatcher_1 + +running test: publicMethod_1 +passed + +running test: privateMethod_2 +passed + +running test: nestedPublicMethod_3 +passed + +running test: nestedPrivateMethod_4 +passed + + +... Running Dispatcher_2 + +running test: publicStaticField +passed + +running test: privateStaticField +passed + +running test: publicInstanceField +passed + +running test: privateInstanceField +passed + +running test: writePublicStaticField +passed + +running test: writePrivateStaticField +passed + +running test: writePublicInstanceField +passed + +running test: writePrivateInstanceField +passed + + +... Running Dispatcher_3 + +running test: privateNestedClassPublicMethod +passed + +running test: privateNestedClassPrivateMethod +passed + +running test: publicNestedClassPublicMethod +passed + +running test: publicNestedClassPrivateMethod +passed + + +2024-12-20T06:10:20Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> diff --git a/tester/document/build_run_transcript_v1.0.txt b/tester/document/build_run_transcript_v1.0.txt new file mode 100644 index 0000000..615640a --- /dev/null +++ b/tester/document/build_run_transcript_v1.0.txt @@ -0,0 +1,62 @@ +This shows all tests passing. + +Tests named `test_failure_` should fail. We need to know that the `TestBench` +can fail tests, so this is part of testing the `TestBench`. + +> cd Mosaic +> source env_tester +> emacs & + +... + +2024-11-04T11:23:08Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass$TestSuite.class' scratchpad/Test_MockClass.class scratchpad/Test_TestBench.class scratchpad/Test_Util.class ++ rm jvm/Test_Mosaic.jar ++ rm shell/Test0 shell/Test_IO shell/Test_MockClass shell/Test_TestBench shell/Test_Util ++ set +x +clean_build_directories done. + +2024-11-04T11:23:23Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> make +Compiling files... ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ javac -g -d scratchpad javac/Test0.java javac/Test_IO.java javac/Test_MockClass.java javac/Test_TestBench.java javac/Test_Util.java ++ jar cf jvm/Test_Mosaic.jar -C scratchpad . ++ set +x +Creating shell wrappers... +tester/tool/make done. + +2024-11-04T11:23:27Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> run_tests +Running Test0...Test0 passed +Running Test_Util...Test_Util passed +Running Test_IO...Test_IO passed +Running Test_TestBench...Expected output: Structural problem message for dummy_invalid_return_method. +Structural problem: dummy_invalid_return_method does not return Boolean. +Test_TestBench Total tests run: 3 +Test_TestBench Total tests passed: 3 +Test_TestBench Total tests failed: 0 +Running Test_MockClass...Test failed: 'test_failure_0' reported failure. +Structural problem: test_failure_1 does not return Boolean. +Error: test_failure_1 has an invalid structure. +Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException +Test failed: 'test_failure_3' produced extraneous stdout. +Test failed: 'test_failure_4' produced extraneous stderr. +Total tests run: 9 +Total tests passed: 4 +Total tests failed: 5 + +2024-11-04T11:23:33Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass$TestSuite.class' scratchpad/Test_MockClass.class scratchpad/Test_TestBench.class scratchpad/Test_Util.class ++ rm jvm/Test_Mosaic.jar ++ rm shell/Test0 shell/Test_IO shell/test_log.txt shell/Test_MockClass shell/Test_TestBench shell/Test_Util ++ set +x +clean_build_directories done. diff --git a/tester/document/build_run_transcript_v1.1.txt b/tester/document/build_run_transcript_v1.1.txt new file mode 100644 index 0000000..feb0d04 --- /dev/null +++ b/tester/document/build_run_transcript_v1.1.txt @@ -0,0 +1,74 @@ +This shows all tests passing. + +Tests named `test_failure_` should fail. We need to know that the `TestBench` +can fail tests, so this is part of testing the `TestBench`. + +Staring the environment: + +2024-11-08T07:41:48Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> bash + +2024-11-08T07:41:51Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer§ +> cd Mosaic + +2024-11-08T07:41:54Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ +> . env_tester +REPO_HOME /var/user_data/Thomas-developer/Mosaic +PROJECT Mosaic +ENV tool_shared/bespoke/env +ENV tester/tool/env + +2024-11-08T07:42:04Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> emacs & + +Running the tests: + +2024-11-08T09:58:40Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> clean_build_directories ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass_0$TestSuite.class' scratchpad/Test_MockClass_0.class scratchpad/Test_Testbench.class scratchpad/Test_Util.class ++ rm jvm/Test_Mosaic.jar ++ rm shell/Test0 shell/Test_IO shell/test_log.txt shell/Test_MockClass_0 shell/Test_Testbench shell/Test_Util ++ set +x +clean_build_directories done. + +2024-11-08T09:58:46Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> make +Compiling files... ++ cd /var/user_data/Thomas-developer/Mosaic/tester ++ javac -g -d scratchpad javac/Test0.java javac/Test_IO.java javac/Test_MockClass_0.java javac/Test_Testbench.java javac/Test_Util.java ++ jar cf jvm/Test_Mosaic.jar -C scratchpad . ++ set +x +Creating shell wrappers... +tester/tool/make done. + +2024-11-08T09:58:50Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> run_tests +Running Test0...Test0 passed +Running Test_Util...Test_Util passed +Running Test_IO...Test_IO passed +Running Test_Testbench...Expected output: Structural problem message for dummy_invalid_return_method. +Structural problem: dummy_invalid_return_method does not return Boolean. +Test_Testbench Total tests run: 3 +Test_Testbench Total tests passed: 3 +Test_Testbench Total tests failed: 0 +Running Test_MockClass_0...Test failed: 'test_failure_0' reported failure. +Structural problem: test_failure_1 does not return Boolean. +Error: test_failure_1 has an invalid structure. +Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException +Test failed: 'test_failure_3' produced extraneous stdout. +Test failed: 'test_failure_4' produced extraneous stderr. +Total tests run: 9 +Total tests passed: 4 +Total tests failed: 5 + +2024-11-08T09:58:55Z[Mosaic_tester] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> diff --git a/tester/document/build_run_transctipt_v1_2024-12-13.txt b/tester/document/build_run_transctipt_v1_2024-12-13.txt new file mode 100644 index 0000000..829315a --- /dev/null +++ b/tester/document/build_run_transctipt_v1_2024-12-13.txt @@ -0,0 +1,69 @@ +2024-12-13T02:40:40Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> run_tests +test_list: Test_Logger Test0 Test_Util Test_IO Test_Testbench Test_MockClass_0 Test_Util_proxy + +... Running Test_Logger +Test passed: 'smoke_test_logging' + +... Running Test0 +Test0 passed + +... Running Test_Util +Test_Util passed + +... Running Test_IO +Test_IO passed + +... Running Test_Testbench +Expected output: Structural problem message for dummy_invalid_return_method. +Structural problem: dummy_invalid_return_method does not return Boolean. +Test_Testbench Total tests run: 3 +Test_Testbench Total tests passed: 3 +Test_Testbench Total tests failed: 0 + +... Running Test_MockClass_0 +Test failed: 'test_failure_0' reported failure. +Structural problem: test_failure_1 does not return Boolean. +Error: test_failure_1 has an invalid structure. +Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException +Test failed: 'test_failure_3' produced extraneous stdout. +Test failed: 'test_failure_4' produced extraneous stderr. +Total tests run: 9 +Total tests passed: 4 +Total tests failed: 5 + +... Running Test_Util_proxy +Total tests run: 3 +Total tests passed: 3 +Total tests failed: 0 + +2024-12-13T02:40:46Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> cat log/log.txt +2024-12-13T02:40:45.582Z [main] INFO c.R.Mosaic.Mosaic_Logger - +2024-12-13T02:40:45.580717856Z ----------------------------------------------------------- +Test: smoke_test_logging +Message: +This is a smoke test for logging. + +2024-12-13T02:40:45.962Z [main] INFO c.R.Mosaic.Mosaic_Logger - +2024-12-13T02:40:45.961627900Z ----------------------------------------------------------- +Test: test_failure_3 +Stream: stdout +Output: +Intentional extraneous chars to stdout for testing + + +2024-12-13T02:40:45.963Z [main] INFO c.R.Mosaic.Mosaic_Logger - +2024-12-13T02:40:45.963744794Z ----------------------------------------------------------- +Test: test_failure_4 +Stream: stderr +Output: +Intentional extraneous chars to stderr for testing. + + + +2024-12-13T02:40:52Z[] +Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ +> diff --git a/tester/document/notes.txt b/tester/document/notes.txt new file mode 100644 index 0000000..330d289 --- /dev/null +++ b/tester/document/notes.txt @@ -0,0 +1,16 @@ + +(setq gud-jdb-use-separate-io-buffer t) + +(defun jdbx () + "Run jdb with a separate input/output buffer." + (interactive) + (let ((sourcepath (getenv "SOURCEPATH")) + (class-name (read-string "Enter class to debug: " "Test_Util"))) + ;; Create a separate buffer for I/O + (let ((io-buffer (get-buffer-create "*gud-jdb-io*"))) + (with-current-buffer io-buffer + (comint-mode)) + ;; Run jdb + (jdb (concat "jdb " (if sourcepath (concat "-sourcepath " sourcepath " ") "") class-name))) + ;; Switch to the I/O buffer + (pop-to-buffer "*gud-jdb-io*"))) diff --git "a/tester/document\360\237\226\211/#build_run_transcript_v1.1.txt#" "b/tester/document\360\237\226\211/#build_run_transcript_v1.1.txt#" deleted file mode 100644 index 6bfa707..0000000 --- "a/tester/document\360\237\226\211/#build_run_transcript_v1.1.txt#" +++ /dev/null @@ -1,75 +0,0 @@ -This shows all tests passing. - -Tests named `test_failure_` should fail. We need to know that the `TestBench` -can fail tests, so this is part of testing the `TestBench`. - -Staring the environment: - -2024-11-08T07:41:48Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> bash - -2024-11-08T07:41:51Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> cd Mosaic - -2024-11-08T07:41:54Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> . env_tester -REPO_HOME /var/user_data/Thomas-developer/Mosaic -PROJECT Mosaic -ENV tool_shared/bespoke/env -ENV tester/tool/env - -2024-11-08T07:42:04Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> emacs & - -Running the tests: - -2024-11-08T09:58:40Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass_0$TestSuite.class' scratchpad/Test_MockClass_0.class scratchpad/Test_Testbench.class scratchpad/Test_Util.class -+ rm jvm/Test_Mosaic.jar -+ rm shell/Test0 shell/Test_IO shell/test_log.txt shell/Test_MockClass_0 shell/Test_Testbench shell/Test_Util -+ set +x -clean_build_directories done. - -2024-11-08T09:58:46Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> make -Compiling files... -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ javac -g -d scratchpad javac/Test0.java javac/Test_IO.java javac/Test_MockClass_0.java javac/Test_Testbench.java javac/Test_Util.java -+ jar cf jvm/Test_Mosaic.jar -C scratchpad . -+ set +x -Creating shell wrappers... -tester/tool/make done. - -2024-11-08T09:58:50Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> run_tests -Running Test0...Test0 passed -Running Test_Util...Test_Util passed -Running Test_IO...Test_IO passed -Running Test_Testbench...Expected output: Structural problem message for dummy_invalid_return_method. -Structural problem: dummy_invalid_return_method does not return Boolean. -Test_Testbench Total tests run: 3 -Test_Testbench Total tests passed: 3 -Test_Testbench Total tests failed: 0 - -Running Test_MockClass_0...Test failed: 'test_failure_0' reported failure. -Structural problem: test_failure_1 does not return Boolean. -Error: test_failure_1 has an invalid structure. -Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException -Test failed: 'test_failure_3' produced extraneous stdout. -Test failed: 'test_failure_4' produced extraneous stderr. -Total tests run: 9 -Total tests passed: 4 -Total tests failed: 5 - -2024-11-08T09:58:55Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> diff --git "a/tester/document\360\237\226\211/Writing a test.txt" "b/tester/document\360\237\226\211/Writing a test.txt" deleted file mode 100644 index d2d02ed..0000000 --- "a/tester/document\360\237\226\211/Writing a test.txt" +++ /dev/null @@ -1,50 +0,0 @@ - -I did not use Mosaic to test itself, although Test_MockClass_0 comes close. - -TestMockClass has the general form of a test that uses Mosaic, though MockClass -itself does not exist. This general form: - -*. For block testing there conventionally be a correspondence between - The test classes and the a class being tested, so each test class will - be named: - - `Test__'. - - Typically the lowest `number` will be zero, and it will correspond to - smoke tests. - -* A `Test__' class will have inside of it another class - called `TestSuite`. By convention each method in this class is a test routine. For block - testing a test routine will has a correspondence to the method being tested, - and has the name: - - `test__`. - - This convention is not always followed, no that in `Test_MackClass_0` you will - notice that tests are named after the expected results rather than a method - that is being tested. - - Test routines can run a number of tests on a RUT, each of which is referred to - as a test case. So we have this hierarchy: - - `Test__' > `TestSuite` > test_routine > test_case - -*. The main call for a Test class will parse arguments and options, setup - the testing environment, make a `TestSuite` object, pass said object to - the `TestBench`, then take the return value from the `Testbench`, and set - the return value from the test. - -* A test routines will return `true` if the test passes. Any other return - value, any uncaught exception, or anything left on the stdout or stderr - will cause the test to be interpreted as a failure. (On the todo list is - an item to make unused stdin an error as well.) - -* A test reoutine (nor the contained test cases) should not themselves print - any messages. Generally it has always been this way, even before the Testbench - redirected and the i/o streams. Rather the test should simply return true - for a pass. This is because in testing we are looking for function failures, - and not for faults. The fault will be searched for later in the debugger. - - If a test routine has an internal error, such that the routine itself - has a problem (not the RUT it is testing), this can be put in a log - entry. See the Mosaic_Util for the log methods. diff --git "a/tester/document\360\237\226\211/build_run_transcript_2024-12-20T06:09:38Z.txt" "b/tester/document\360\237\226\211/build_run_transcript_2024-12-20T06:09:38Z.txt" deleted file mode 100644 index 689546b..0000000 --- "a/tester/document\360\237\226\211/build_run_transcript_2024-12-20T06:09:38Z.txt" +++ /dev/null @@ -1,220 +0,0 @@ -2024-12-20T06:09:38Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> clean; make; run -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ rm_na log/log.txt -rm_na: cannot remove 'log/log.txt': No such file or directory -+ rm_na -r scratchpad/Dispatcher_0.class scratchpad/Dispatcher_1.class scratchpad/Dispatcher_2.class scratchpad/Dispatcher_3.class scratchpad/IO.class 'scratchpad/IsPrimitive$TestSuite$1TestEnum.class' 'scratchpad/IsPrimitive$TestSuite.class' scratchpad/IsPrimitive.class 'scratchpad/Logger$TestSuite.class' scratchpad/Logger.class 'scratchpad/MockClass_0$TestSuite.class' scratchpad/MockClass_0.class scratchpad/smoke.class scratchpad/Testbench.class scratchpad/tester scratchpad/Util.class -+ rm_na jvm/Dispatcher_0 jvm/Dispatcher_1 jvm/Dispatcher_2 jvm/Dispatcher_3 jvm/IO jvm/IsPrimitive jvm/Logger jvm/MockClass_0 jvm/smoke jvm/Testbench jvm/Util -+ rm_na jdwp_server/Dispatcher_0 jdwp_server/Dispatcher_1 jdwp_server/Dispatcher_2 jdwp_server/Dispatcher_3 jdwp_server/IO jdwp_server/IsPrimitive jdwp_server/Logger jdwp_server/MockClass_0 jdwp_server/smoke jdwp_server/Testbench jdwp_server/Util -+ set +x -clean done. -++ realpath /var/user_data/Thomas-developer/Mosaic/tester/tool🖉/make -+ script_afp=/var/user_data/Thomas-developer/Mosaic/tester/tool🖉/make -+ env_must_be=tester/tool🖉/env -+ '[' tester/tool🖉/env '!=' tester/tool🖉/env ']' -+ echo 'Compiling files...' -Compiling files... -+ set -x -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ javac -g -d scratchpad javac🖉/TestClasses_0.java javac🖉/TestClasses_1.java javac🖉/TestClasses_2.java -++ list -+ list='smoke Logger Util IO Testbench MockClass_0 IsPrimitive Dispatcher_0 Dispatcher_1 Dispatcher_2 Dispatcher_3' -+ for file in $list -+ javac -g -d scratchpad javac🖉/smoke.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/Logger.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/Util.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/IO.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/Testbench.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/MockClass_0.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/IsPrimitive.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/Dispatcher_0.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/Dispatcher_1.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/Dispatcher_2.java -+ for file in $list -+ javac -g -d scratchpad javac🖉/Dispatcher_3.java -+ set +x -Making jvm scripts ... -Making jdwp debug server scripts... -tester/tool🖉/make done. -list: smoke Logger Util IO Testbench MockClass_0 IsPrimitive Dispatcher_0 Dispatcher_1 Dispatcher_2 Dispatcher_3 - -... Running smoke -Test0 passed - -... Running Logger -Exception in thread "main" java.lang.NoClassDefFoundError: org/slf4j/LoggerFactory - at com.ReasoningTechnology.Mosaic.Mosaic_Logger.(Mosaic_Logger.java:12) - at Logger$TestSuite.smoke_test_logging(Logger.java:9) - at Logger.main(Logger.java:21) -Caused by: java.lang.ClassNotFoundException: org.slf4j.LoggerFactory - at java.base/jdk.internal.loader.BuiltinClassLoader.loadClass(BuiltinClassLoader.java:641) - at java.base/jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass(ClassLoaders.java:188) - at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:528) - ... 3 more - -... Running Util -Util passed - -... Running IO -IO passed - -... Running Testbench -Expected output: Structural problem message for dummy_invalid_return_method. -Structural problem: dummy_invalid_return_method does not return Boolean. -Testbench Total tests run: 3 -Testbench Total tests passed: 3 -Testbench Total tests failed: 0 - -... Running MockClass_0 -Test failed: 'test_failure_0' reported failure. -Structural problem: test_failure_1 does not return Boolean. -Error: test_failure_1 has an invalid structure. -Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException -Test failed: 'test_failure_3' produced extraneous stdout. -Exception in thread "main" java.lang.NoClassDefFoundError: org/slf4j/LoggerFactory - at com.ReasoningTechnology.Mosaic.Mosaic_Logger.(Mosaic_Logger.java:12) - at com.ReasoningTechnology.Mosaic.Mosaic_Testbench.run_test(Mosaic_Testbench.java:74) - at com.ReasoningTechnology.Mosaic.Mosaic_Testbench.run(Mosaic_Testbench.java:95) - at MockClass_0.main(MockClass_0.java:94) -Caused by: java.lang.ClassNotFoundException: org.slf4j.LoggerFactory - at java.base/jdk.internal.loader.BuiltinClassLoader.loadClass(BuiltinClassLoader.java:641) - at java.base/jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass(ClassLoaders.java:188) - at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:528) - ... 4 more - -... Running IsPrimitive -Total tests run: 15 -Total tests passed: 15 -Total tests failed: 0 - -... Running Dispatcher_0 -making map for TestClasses_0 -Mosaic_Dispatcher:: mapping methods given class_metadata object: tester.TestClasses_0 -MethodSignature_To_Handle_Map::add_class adding methods -(add_entry:: (key boolean tester.TestClasses_0.a_public_method_1()) (value MethodHandle(TestClasses_0)boolean)) -(add_entry:: (key boolean tester.TestClasses_0.a_public_static_method_7()) (value MethodHandle()boolean)) -(add_entry:: (key boolean tester.TestClasses_0.a_private_static_method_9()) (value MethodHandle()boolean)) -(add_entry:: (key boolean tester.TestClasses_0.a_private_method_2()) (value MethodHandle(TestClasses_0)boolean)) -MethodSignature_To_Handle_Map::add_class adding constructors -(add_entry:: (key void tester.TestClasses_0.()) (value MethodHandle()TestClasses_0)) -MethodSignature_To_Handle_Map::add_class adding fields - -running test: publicClass_publicMethod -Call to Mosaic_Dispatcher::dispatch for a method bound to an instance. -dispatch_1:: signature key:boolean tester.TestClasses_0.a_public_method_1() -passed - -running test: make_0 -Mosaic_Dispatcher:: mapping methods given class_metadata object: tester.TestClasses_1 -MethodSignature_To_Handle_Map::add_class adding methods -(add_entry:: (key int tester.TestClasses_1.get_i()) (value MethodHandle(TestClasses_1)int)) -MethodSignature_To_Handle_Map::add_class adding constructors -(add_entry:: (key void tester.TestClasses_1.(int ,int)) (value MethodHandle(int,int)TestClasses_1)) -(add_entry:: (key void tester.TestClasses_1.(int)) (value MethodHandle(int)TestClasses_1)) -(add_entry:: (key void tester.TestClasses_1.()) (value MethodHandle()TestClasses_1)) -MethodSignature_To_Handle_Map::add_class adding fields -Call to Mosaic_Dispatcher::make -dispatch_1:: signature key:void tester.TestClasses_1.() -Call to Mosaic_Dispatcher::make -dispatch_1:: signature key:void tester.TestClasses_1.(int) -Call to Mosaic_Dispatcher::make -dispatch_1:: signature key:void tester.TestClasses_1.(int ,int) -passed - -running test: publicStaticMethod_7 -Call to Mosaic_Dispatcher::dispatch for a static method. -dispatch_1:: signature key:boolean tester.TestClasses_0.a_public_static_method_7() -passed - -running test: privateStaticMethod_9 -Call to Mosaic_Dispatcher::dispatch for a static method. -dispatch_1:: signature key:boolean tester.TestClasses_0.a_private_static_method_9() -passed - -running test: defaultClass_access -Mosaic_Dispatcher:: mapping methods from class specified by string: "tester.DefaultTestClass_01" -MethodSignature_To_Handle_Map::add_class adding methods -(add_entry:: (key boolean tester.DefaultTestClass_01.a_public_method_7()) (value MethodHandle(DefaultTestClass_01)boolean)) -(add_entry:: (key boolean tester.DefaultTestClass_01.a_private_method_8()) (value MethodHandle(DefaultTestClass_01)boolean)) -MethodSignature_To_Handle_Map::add_class adding constructors -(add_entry:: (key void tester.DefaultTestClass_01.()) (value MethodHandle()DefaultTestClass_01)) -MethodSignature_To_Handle_Map::add_class adding fields -Call to Mosaic_Dispatcher::make -dispatch_1:: signature key:void tester.DefaultTestClass_01.() -Call to Mosaic_Dispatcher::dispatch for a method bound to an instance. -dispatch_1:: signature key:boolean tester.DefaultTestClass_01.a_public_method_7() -Call to Mosaic_Dispatcher::dispatch for a method bound to an instance. -dispatch_1:: signature key:boolean tester.DefaultTestClass_01.a_private_method_8() -passed - - -... Running Dispatcher_1 - -running test: publicMethod_1 -passed - -running test: privateMethod_2 -passed - -running test: nestedPublicMethod_3 -passed - -running test: nestedPrivateMethod_4 -passed - - -... Running Dispatcher_2 - -running test: publicStaticField -passed - -running test: privateStaticField -passed - -running test: publicInstanceField -passed - -running test: privateInstanceField -passed - -running test: writePublicStaticField -passed - -running test: writePrivateStaticField -passed - -running test: writePublicInstanceField -passed - -running test: writePrivateInstanceField -passed - - -... Running Dispatcher_3 - -running test: privateNestedClassPublicMethod -passed - -running test: privateNestedClassPrivateMethod -passed - -running test: publicNestedClassPublicMethod -passed - -running test: publicNestedClassPrivateMethod -passed - - -2024-12-20T06:10:20Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> diff --git "a/tester/document\360\237\226\211/build_run_transcript_v1.0.txt" "b/tester/document\360\237\226\211/build_run_transcript_v1.0.txt" deleted file mode 100644 index 615640a..0000000 --- "a/tester/document\360\237\226\211/build_run_transcript_v1.0.txt" +++ /dev/null @@ -1,62 +0,0 @@ -This shows all tests passing. - -Tests named `test_failure_` should fail. We need to know that the `TestBench` -can fail tests, so this is part of testing the `TestBench`. - -> cd Mosaic -> source env_tester -> emacs & - -... - -2024-11-04T11:23:08Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass$TestSuite.class' scratchpad/Test_MockClass.class scratchpad/Test_TestBench.class scratchpad/Test_Util.class -+ rm jvm/Test_Mosaic.jar -+ rm shell/Test0 shell/Test_IO shell/Test_MockClass shell/Test_TestBench shell/Test_Util -+ set +x -clean_build_directories done. - -2024-11-04T11:23:23Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> make -Compiling files... -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ javac -g -d scratchpad javac/Test0.java javac/Test_IO.java javac/Test_MockClass.java javac/Test_TestBench.java javac/Test_Util.java -+ jar cf jvm/Test_Mosaic.jar -C scratchpad . -+ set +x -Creating shell wrappers... -tester/tool/make done. - -2024-11-04T11:23:27Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> run_tests -Running Test0...Test0 passed -Running Test_Util...Test_Util passed -Running Test_IO...Test_IO passed -Running Test_TestBench...Expected output: Structural problem message for dummy_invalid_return_method. -Structural problem: dummy_invalid_return_method does not return Boolean. -Test_TestBench Total tests run: 3 -Test_TestBench Total tests passed: 3 -Test_TestBench Total tests failed: 0 -Running Test_MockClass...Test failed: 'test_failure_0' reported failure. -Structural problem: test_failure_1 does not return Boolean. -Error: test_failure_1 has an invalid structure. -Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException -Test failed: 'test_failure_3' produced extraneous stdout. -Test failed: 'test_failure_4' produced extraneous stderr. -Total tests run: 9 -Total tests passed: 4 -Total tests failed: 5 - -2024-11-04T11:23:33Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass$TestSuite.class' scratchpad/Test_MockClass.class scratchpad/Test_TestBench.class scratchpad/Test_Util.class -+ rm jvm/Test_Mosaic.jar -+ rm shell/Test0 shell/Test_IO shell/test_log.txt shell/Test_MockClass shell/Test_TestBench shell/Test_Util -+ set +x -clean_build_directories done. diff --git "a/tester/document\360\237\226\211/build_run_transcript_v1.1.txt" "b/tester/document\360\237\226\211/build_run_transcript_v1.1.txt" deleted file mode 100644 index feb0d04..0000000 --- "a/tester/document\360\237\226\211/build_run_transcript_v1.1.txt" +++ /dev/null @@ -1,74 +0,0 @@ -This shows all tests passing. - -Tests named `test_failure_` should fail. We need to know that the `TestBench` -can fail tests, so this is part of testing the `TestBench`. - -Staring the environment: - -2024-11-08T07:41:48Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> bash - -2024-11-08T07:41:51Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer§ -> cd Mosaic - -2024-11-08T07:41:54Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic§ -> . env_tester -REPO_HOME /var/user_data/Thomas-developer/Mosaic -PROJECT Mosaic -ENV tool_shared/bespoke/env -ENV tester/tool/env - -2024-11-08T07:42:04Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> emacs & - -Running the tests: - -2024-11-08T09:58:40Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> clean_build_directories -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ rm -r scratchpad/Test0.class scratchpad/Test_IO.class 'scratchpad/Test_MockClass_0$TestSuite.class' scratchpad/Test_MockClass_0.class scratchpad/Test_Testbench.class scratchpad/Test_Util.class -+ rm jvm/Test_Mosaic.jar -+ rm shell/Test0 shell/Test_IO shell/test_log.txt shell/Test_MockClass_0 shell/Test_Testbench shell/Test_Util -+ set +x -clean_build_directories done. - -2024-11-08T09:58:46Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> make -Compiling files... -+ cd /var/user_data/Thomas-developer/Mosaic/tester -+ javac -g -d scratchpad javac/Test0.java javac/Test_IO.java javac/Test_MockClass_0.java javac/Test_Testbench.java javac/Test_Util.java -+ jar cf jvm/Test_Mosaic.jar -C scratchpad . -+ set +x -Creating shell wrappers... -tester/tool/make done. - -2024-11-08T09:58:50Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> run_tests -Running Test0...Test0 passed -Running Test_Util...Test_Util passed -Running Test_IO...Test_IO passed -Running Test_Testbench...Expected output: Structural problem message for dummy_invalid_return_method. -Structural problem: dummy_invalid_return_method does not return Boolean. -Test_Testbench Total tests run: 3 -Test_Testbench Total tests passed: 3 -Test_Testbench Total tests failed: 0 -Running Test_MockClass_0...Test failed: 'test_failure_0' reported failure. -Structural problem: test_failure_1 does not return Boolean. -Error: test_failure_1 has an invalid structure. -Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException -Test failed: 'test_failure_3' produced extraneous stdout. -Test failed: 'test_failure_4' produced extraneous stderr. -Total tests run: 9 -Total tests passed: 4 -Total tests failed: 5 - -2024-11-08T09:58:55Z[Mosaic_tester] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> diff --git "a/tester/document\360\237\226\211/build_run_transctipt_v1_2024-12-13.txt" "b/tester/document\360\237\226\211/build_run_transctipt_v1_2024-12-13.txt" deleted file mode 100644 index 829315a..0000000 --- "a/tester/document\360\237\226\211/build_run_transctipt_v1_2024-12-13.txt" +++ /dev/null @@ -1,69 +0,0 @@ -2024-12-13T02:40:40Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> run_tests -test_list: Test_Logger Test0 Test_Util Test_IO Test_Testbench Test_MockClass_0 Test_Util_proxy - -... Running Test_Logger -Test passed: 'smoke_test_logging' - -... Running Test0 -Test0 passed - -... Running Test_Util -Test_Util passed - -... Running Test_IO -Test_IO passed - -... Running Test_Testbench -Expected output: Structural problem message for dummy_invalid_return_method. -Structural problem: dummy_invalid_return_method does not return Boolean. -Test_Testbench Total tests run: 3 -Test_Testbench Total tests passed: 3 -Test_Testbench Total tests failed: 0 - -... Running Test_MockClass_0 -Test failed: 'test_failure_0' reported failure. -Structural problem: test_failure_1 does not return Boolean. -Error: test_failure_1 has an invalid structure. -Test failed: 'test_failure_2' threw an exception: java.lang.reflect.InvocationTargetException -Test failed: 'test_failure_3' produced extraneous stdout. -Test failed: 'test_failure_4' produced extraneous stderr. -Total tests run: 9 -Total tests passed: 4 -Total tests failed: 5 - -... Running Test_Util_proxy -Total tests run: 3 -Total tests passed: 3 -Total tests failed: 0 - -2024-12-13T02:40:46Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> cat log/log.txt -2024-12-13T02:40:45.582Z [main] INFO c.R.Mosaic.Mosaic_Logger - -2024-12-13T02:40:45.580717856Z ----------------------------------------------------------- -Test: smoke_test_logging -Message: -This is a smoke test for logging. - -2024-12-13T02:40:45.962Z [main] INFO c.R.Mosaic.Mosaic_Logger - -2024-12-13T02:40:45.961627900Z ----------------------------------------------------------- -Test: test_failure_3 -Stream: stdout -Output: -Intentional extraneous chars to stdout for testing - - -2024-12-13T02:40:45.963Z [main] INFO c.R.Mosaic.Mosaic_Logger - -2024-12-13T02:40:45.963744794Z ----------------------------------------------------------- -Test: test_failure_4 -Stream: stderr -Output: -Intentional extraneous chars to stderr for testing. - - - -2024-12-13T02:40:52Z[] -Thomas-developer@Blossac§/var/user_data/Thomas-developer/Mosaic/tester§ -> diff --git "a/tester/document\360\237\226\211/notes.txt" "b/tester/document\360\237\226\211/notes.txt" deleted file mode 100644 index 330d289..0000000 --- "a/tester/document\360\237\226\211/notes.txt" +++ /dev/null @@ -1,16 +0,0 @@ - -(setq gud-jdb-use-separate-io-buffer t) - -(defun jdbx () - "Run jdb with a separate input/output buffer." - (interactive) - (let ((sourcepath (getenv "SOURCEPATH")) - (class-name (read-string "Enter class to debug: " "Test_Util"))) - ;; Create a separate buffer for I/O - (let ((io-buffer (get-buffer-create "*gud-jdb-io*"))) - (with-current-buffer io-buffer - (comint-mode)) - ;; Run jdb - (jdb (concat "jdb " (if sourcepath (concat "-sourcepath " sourcepath " ") "") class-name))) - ;; Switch to the I/O buffer - (pop-to-buffer "*gud-jdb-io*"))) diff --git a/tester/javac/Dispatcher_0.java b/tester/javac/Dispatcher_0.java new file mode 100644 index 0000000..7f4e1f2 --- /dev/null +++ b/tester/javac/Dispatcher_0.java @@ -0,0 +1,167 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; +import com.ReasoningTechnology.Mosaic.Mosaic_IsPrimitive; +import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; + +import tester.TestClasses_0; +import tester.TestClasses_1; + +public class Dispatcher_0{ + + private static Mosaic_Dispatcher dispatcher; + + static{ + // Initialize the dispatcher for TestClasses_0 + Mosaic_Dispatcher.test_switch(true); + } + + public Dispatcher_0(){ + Mosaic_Dispatcher.test_print("making map for TestClasses_0"); + dispatcher = new Mosaic_Dispatcher(TestClasses_0.class); + } + + // Test method to access the public method of the public class + public static boolean test_publicClass_publicMethod(){ + Object instance = new TestClasses_0(); + boolean result = dispatcher.dispatch + ( + instance // target instance + ,boolean.class // return type + ,"a_public_method_1" // method name + ); + + return result; + } + + public static boolean test_make_0(){ + Boolean[] condition_list = new Boolean[4]; + Mosaic_Quantifier.all_set_false(condition_list); + int i = 0; + + Mosaic_Dispatcher d1 = new Mosaic_Dispatcher(TestClasses_1.class); + + TestClasses_1 tc0 = new TestClasses_1(); + condition_list[i++] = tc0.get_i() == 0; + + TestClasses_1 tc1 = (TestClasses_1) d1.make(); + condition_list[i++] = tc1.get_i() == 0; + + TestClasses_1 tc2 = (TestClasses_1) d1.make(new Mosaic_IsPrimitive(7)); + condition_list[i++] = tc2.get_i() == 7; + + TestClasses_1 tc3 = (TestClasses_1) d1.make(new Mosaic_IsPrimitive(21) ,new Mosaic_IsPrimitive(17) ); + condition_list[i++] = tc3.get_i() == 38; + + return Mosaic_Quantifier.all(condition_list); + } + + // Test public static method + public static boolean test_publicStaticMethod_7(){ + boolean result = dispatcher.dispatch( + boolean.class, // return type + "a_public_static_method_7" // method name + ); + return result; + } + + // Test private static method + public static boolean test_privateStaticMethod_9(){ + boolean result = dispatcher.dispatch( + boolean.class, // return type + "a_private_static_method_9" // method name + ); + return result; + } + + public static boolean test_defaultClass_access(){ + try{ + Mosaic_Dispatcher d2=new Mosaic_Dispatcher("tester.DefaultTestClass_01"); + Object instance=d2.make(); + boolean result1=d2.dispatch( + instance // target instance + ,boolean.class // return type + ,"a_public_method_7" // public method name + ); + + boolean result2=d2.dispatch( + instance // target instance + ,boolean.class // return type + ,"a_private_method_8" // private method name + ); + + return result1 && result2; + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + // Extend the run method to include static method tests + public static boolean run(){ + try{ + boolean result = true; + + System.out.println(""); + System.out.println("running test: publicClass_publicMethod"); + if (Boolean.TRUE.equals(test_publicClass_publicMethod())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: make_0"); + if (Boolean.TRUE.equals(test_make_0())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: publicStaticMethod_7"); + if (Boolean.TRUE.equals(test_publicStaticMethod_7())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: privateStaticMethod_9"); + if (Boolean.TRUE.equals(test_privateStaticMethod_9())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: defaultClass_access"); + if(Boolean.TRUE.equals(test_defaultClass_access())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + return result; + + }catch (Exception e){ + System.out.println("Exception in Dispatcher_0 test:"); + e.printStackTrace(); + return false; + } + } + + public static void main(String[] args){ + // Execute the run method and return its result as the exit code + new Dispatcher_0(); + if( run() ) + System.exit(0); + else + System.exit(1); + } + +} diff --git a/tester/javac/Dispatcher_1.java b/tester/javac/Dispatcher_1.java new file mode 100644 index 0000000..4758510 --- /dev/null +++ b/tester/javac/Dispatcher_1.java @@ -0,0 +1,111 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; +import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; + +import tester.TestClasses_0; + +public class Dispatcher_1{ + + private static Mosaic_Dispatcher dispatcher; + + static{ + dispatcher = new Mosaic_Dispatcher(TestClasses_0.class); + } + + public static boolean test_publicMethod_1(){ + TestClasses_0 instance = new TestClasses_0(); + return dispatcher.dispatch(instance, boolean.class, "a_public_method_1"); + } + + public static boolean test_privateMethod_2(){ + TestClasses_0 instance = new TestClasses_0(); + return dispatcher.dispatch(instance, boolean.class, "a_private_method_2"); + } + + public static boolean test_nestedPublicMethod_3(){ + try{ + TestClasses_0 outer = new TestClasses_0(); + TestClasses_0.APublicClass_01 nested = outer.new APublicClass_01(); + Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); + return nested_dispatcher.dispatch(nested, boolean.class, "a_public_method_3"); + } catch(Exception e){ + e.printStackTrace(); + return false; + } + } + + public static boolean test_nestedPrivateMethod_4(){ + try{ + TestClasses_0 outer = new TestClasses_0(); + TestClasses_0.APublicClass_01 nested = outer.new APublicClass_01(); + Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); + return nested_dispatcher.dispatch(nested, boolean.class, "a_private_method_4"); + } catch(Exception e){ + e.printStackTrace(); + return false; + } + } + + public static boolean run(){ + try{ + boolean result = true; + + System.out.println(""); + System.out.println("running test: publicMethod_1"); + if(Boolean.TRUE.equals(test_publicMethod_1())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: privateMethod_2"); + if(Boolean.TRUE.equals(test_privateMethod_2())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: nestedPublicMethod_3"); + if(Boolean.TRUE.equals(test_nestedPublicMethod_3())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: nestedPrivateMethod_4"); + if(Boolean.TRUE.equals(test_nestedPrivateMethod_4())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + return result; + + }catch(Exception e){ + System.out.println("Exception in Dispatcher_1 test:"); + e.printStackTrace(); + return false; + } + } + + private static boolean logPass(){ + System.out.println("passed"); + return true; + } + + private static boolean logFail(){ + System.out.println("FAILED"); + return false; + } + + public static void main(String[] args){ + System.exit(run() ? 0 : 1); + } +} diff --git a/tester/javac/Dispatcher_2.java b/tester/javac/Dispatcher_2.java new file mode 100644 index 0000000..cb1bb97 --- /dev/null +++ b/tester/javac/Dispatcher_2.java @@ -0,0 +1,199 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; + +import tester.TestClasses_2; + +public class Dispatcher_2{ + + private static Mosaic_Dispatcher dispatcher; + + static{ + TestClasses_2.initialize_static_fields(); + dispatcher=new Mosaic_Dispatcher(TestClasses_2.class); + } + + public static boolean test_publicStaticField(){ + try{ + Integer value=dispatcher.read("i_200"); + return value != null && value == 200; + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean test_privateStaticField(){ + try{ + String value=dispatcher.read("s_201"); + return value != null && value.equals("Static Private String"); + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean test_publicInstanceField(){ + try{ + TestClasses_2 instance=dispatcher.make(); + instance.initialize_instance_fields(); + Integer value=dispatcher.read(instance,"i_202"); + return value != null && value == 202; + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean test_privateInstanceField(){ + try{ + TestClasses_2 instance=dispatcher.make(); + instance.initialize_instance_fields(); + Integer value=dispatcher.read(instance,"i_203"); + return value != null && value == 203; + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean test_writePublicStaticField(){ + try{ + dispatcher.write("i_200",300); + Integer value=dispatcher.read("i_200"); + return value != null && value == 300; + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean test_writePrivateStaticField(){ + try{ + dispatcher.write("s_201","New Static Private String"); + String value=dispatcher.read("s_201"); + return value != null && value.equals("New Static Private String"); + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean test_writePublicInstanceField(){ + try{ + TestClasses_2 instance=dispatcher.make(); + dispatcher.write(instance,"i_202",400); + Integer value=dispatcher.read(instance,"i_202"); + return value != null && value == 400; + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean test_writePrivateInstanceField(){ + try{ + TestClasses_2 instance=dispatcher.make(); + dispatcher.write(instance,"i_203",500); + Integer value=dispatcher.read(instance,"i_203"); + return value != null && value == 500; + }catch(Throwable t){ + t.printStackTrace(); + return false; + } + } + + public static boolean run(){ + try{ + boolean result=true; + + // Existing read tests + System.out.println(""); + System.out.println("running test: publicStaticField"); + if(Boolean.TRUE.equals(test_publicStaticField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + System.out.println("running test: privateStaticField"); + if(Boolean.TRUE.equals(test_privateStaticField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + System.out.println("running test: publicInstanceField"); + if(Boolean.TRUE.equals(test_publicInstanceField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + System.out.println("running test: privateInstanceField"); + if(Boolean.TRUE.equals(test_privateInstanceField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + // New write tests + System.out.println(""); + System.out.println("running test: writePublicStaticField"); + if(Boolean.TRUE.equals(test_writePublicStaticField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + System.out.println("running test: writePrivateStaticField"); + if(Boolean.TRUE.equals(test_writePrivateStaticField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + System.out.println("running test: writePublicInstanceField"); + if(Boolean.TRUE.equals(test_writePublicInstanceField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + System.out.println("running test: writePrivateInstanceField"); + if(Boolean.TRUE.equals(test_writePrivateInstanceField())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result=false; + } + + System.out.println(""); + return result; + + }catch(Exception e){ + System.out.println("Exception in Dispatcher_2 test:"); + e.printStackTrace(); + return false; + } + } + + public static void main(String[] args){ + if(run()){ + System.exit(0); + }else{ + System.exit(1); + } + } +} diff --git a/tester/javac/Dispatcher_3.java b/tester/javac/Dispatcher_3.java new file mode 100644 index 0000000..e79a1af --- /dev/null +++ b/tester/javac/Dispatcher_3.java @@ -0,0 +1,132 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; + +import tester.TestClasses_0; + +public class Dispatcher_3{ + + public static boolean test_privateNestedClassPublicMethod(){ + try{ + TestClasses_0 outer_instance = new TestClasses_0(); + Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher("tester.TestClasses_0$APrivateClass_02"); + Object nested_instance = nested_dispatcher.make(new Object[]{outer_instance}); + boolean result = nested_dispatcher.dispatch( + nested_instance + ,boolean.class + ,"a_public_method_5" + ); + return result; + }catch(Throwable t){ + System.out.println("Exception in test_privateNestedClassPublicMethod:"); + t.printStackTrace(); + return false; + } + } + + public static boolean test_privateNestedClassPrivateMethod(){ + try{ + TestClasses_0 outer_instance = new TestClasses_0(); + Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher("tester.TestClasses_0$APrivateClass_02"); + Object nested_instance = nested_dispatcher.make(new Object[]{outer_instance}); + boolean result = nested_dispatcher.dispatch( + nested_instance + ,boolean.class + ,"a_private_method_6" + ); + return result; + }catch(Throwable t){ + System.out.println("Exception in test_privateNestedClassPrivateMethod:"); + t.printStackTrace(); + return false; + } + } + + public static boolean test_publicNestedClassPublicMethod(){ + try{ + TestClasses_0 outer = new TestClasses_0(); + TestClasses_0.APublicClass_01 nested_instance = outer.new APublicClass_01(); + Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); + boolean result = nested_dispatcher.dispatch( + nested_instance + ,boolean.class + ,"a_public_method_3" + ); + return result; + }catch(Throwable t){ + System.out.println("Exception in test_publicNestedClassPublicMethod:"); + t.printStackTrace(); + return false; + } + } + + public static boolean test_publicNestedClassPrivateMethod(){ + try{ + TestClasses_0 outer = new TestClasses_0(); + TestClasses_0.APublicClass_01 nested_instance = outer.new APublicClass_01(); + Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); + boolean result = nested_dispatcher.dispatch( + nested_instance + ,boolean.class + ,"a_private_method_4" + ); + return result; + }catch(Throwable t){ + System.out.println("Exception in test_publicNestedClassPrivateMethod:"); + t.printStackTrace(); + return false; + } + } + + public static boolean run(){ + try{ + boolean result = true; + + System.out.println(""); + System.out.println("running test: privateNestedClassPublicMethod"); + if(Boolean.TRUE.equals(test_privateNestedClassPublicMethod())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: privateNestedClassPrivateMethod"); + if(Boolean.TRUE.equals(test_privateNestedClassPrivateMethod())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: publicNestedClassPublicMethod"); + if(Boolean.TRUE.equals(test_publicNestedClassPublicMethod())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + System.out.println("running test: publicNestedClassPrivateMethod"); + if(Boolean.TRUE.equals(test_publicNestedClassPrivateMethod())){ + System.out.println("passed"); + }else{ + System.out.println("FAILED"); + result = false; + } + + System.out.println(""); + return result; + + }catch(Exception e){ + System.out.println("Exception in Dispatcher_3 test:"); + e.printStackTrace(); + return false; + } + } + + public static void main(String[] args){ + System.exit(run() ? 0 : 1); + } +} diff --git a/tester/javac/FunctionSignature_0.java b/tester/javac/FunctionSignature_0.java new file mode 100644 index 0000000..d45e2cc --- /dev/null +++ b/tester/javac/FunctionSignature_0.java @@ -0,0 +1,42 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_AllMethodsPublicProxy; +import com.ReasoningTechnology.Mosaic.Mosaic_IO; +import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; + +public class FunctionSignature_0 { + + public static class TestSuite { + + private static Mosaic_AllMethodsPublicProxy proxy; + + static { + try { + proxy = new Mosaic_AllMethodsPublicProxy("com.ReasoningTechnology.Mosaic.FunctionSignature"); + } catch (ClassNotFoundException e) { + System.err.println("Failed to initialize proxy: " + e.getMessage()); + } + } + + public Boolean smoke_test_0(Mosaic_IO io) { + try { + // Create a FunctionSignature instance via the proxy constructor + Object signature = proxy.construct("", "testMethod", new Class[]{}); + + // Call the toString method on the proxy instance + String result = (String) proxy.invoke(signature, "toString"); + + // Check expected output + return "testMethod()".equals(result); + } catch (Exception e) { + System.err.println("Test failed: " + e.getMessage()); + e.printStackTrace(); + return false; + } + } + } + + public static void main(String[] args) { + TestSuite suite = new TestSuite(); + int result = Mosaic_Testbench.run(suite); + System.exit(result); + } +} diff --git a/tester/javac/IO.java b/tester/javac/IO.java new file mode 100644 index 0000000..5615a09 --- /dev/null +++ b/tester/javac/IO.java @@ -0,0 +1,73 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_IO; +import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; + +public class IO{ + + public static int fut(){ + try{ + // Echo some characters from stdin to stdout + System.out.print((char) System.in.read()); + System.out.print((char) System.in.read()); + + // Echo some more characters from stdin to stderr + System.err.print((char) System.in.read()); + System.err.print((char) System.in.read()); + + // Count remaining characters until EOF + int count = 0; + while(System.in.read() != -1){ + count++; + } + + return count; + } catch(Exception e){ + e.printStackTrace(); + return -1; // Error case + } + } + + public static int run(){ + Mosaic_IO io = new Mosaic_IO(); + Boolean[] condition = new Boolean[3]; + + // Redirect IO streams + io.redirect(); + + // Provide input for the function under test + io.push_input("abcdefg"); + + // Execute function under test + int result = fut(); + + // Check stdout content + String stdout_string = io.get_out_content(); + condition[0] = stdout_string.equals("ab"); + + // Check stderr content + String stderr_string = io.get_err_content(); + condition[1] = stderr_string.equals("cd"); + + // Check returned character count (3 remaining characters: 'e','f','g') + condition[2] = result == 3; + + // Restore original IO streams + io.restore(); + + if(!Mosaic_Quantifier.all(condition)){ + System.out.println("IO failed"); + return 1; + } + System.out.println("IO passed"); + return 0; + } + + // Main function to provide a shell interface for running tests + public static void main(String[] args){ + int return_code = run(); + System.exit(return_code); + return; + } + +} + + diff --git a/tester/javac/IsPrimitive.java b/tester/javac/IsPrimitive.java new file mode 100644 index 0000000..9b6aa2d --- /dev/null +++ b/tester/javac/IsPrimitive.java @@ -0,0 +1,118 @@ +/* -------------------------------------------------------------------------------- + Integration tests directly simulate the use cases for Mosaic_Testbench. + Each test method validates a specific feature of Mosaic_Testbench ,including pass, + fail ,error handling ,and I/O interactions. +*/ +import java.util.Scanner; + +import com.ReasoningTechnology.Mosaic.Mosaic_IO; +import com.ReasoningTechnology.Mosaic.Mosaic_IsPrimitive; +import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; + +import com.ReasoningTechnology.Mosaic.Mosaic_IO; +import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; +import com.ReasoningTechnology.Mosaic.Mosaic_IsPrimitive; + +public class IsPrimitive{ + + public class TestSuite{ + + + public Boolean test_int_type(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(42); + return mip.get_type().equals(int.class); + } + + public Boolean test_boolean_type(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(true); + return mip.get_type().equals(boolean.class); + } + + public Boolean test_double_type(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(3.14); + return mip.get_type().equals(double.class); + } + + public Boolean test_string_type(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make("hello"); + return mip.get_type().equals(String.class); + } + + public Boolean test_object_type(Mosaic_IO io){ + Object obj = new Object(); + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(obj); + return mip.get_type().equals(Object.class); + } + + public Boolean test_char_type(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make('a'); + return mip.get_type().equals(char.class); + } + + public Boolean test_null_value(Mosaic_IO io){ + try{ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(null); + return mip.get_type() == null; // Should handle gracefully or throw + } catch (Exception e){ + return false; + } + } + + public Boolean test_empty_string(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(""); + return mip.get_type().equals(String.class); + } + + public Boolean test_blank_string(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(" "); + return mip.get_type().equals(String.class); + } + + // When passing arguments through Object types, there is no way + // for the callee to know if the caller sent a primitive type or a + // boxed value. This is the point of having IsPrimitive. + // IsPrimitive indicates that we really mean to send the primitive + // type, though it appears in the box. + public Boolean test_primitive_wrapper(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(Integer.valueOf(42)); + return mip.get_type().equals(int.class); + } + + public Boolean test_primitive_array(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(new int[]{1, 2, 3}); + return mip.get_type().equals(int[].class); + } + + public Boolean test_object_array(Mosaic_IO io){ + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(new String[]{"a", "b", "c"}); + return mip.get_type().equals(String[].class); + } + + public Boolean test_enum_type(Mosaic_IO io){ + enum TestEnum{ VALUE1, VALUE2 } + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(TestEnum.VALUE1); + return mip.get_type().equals(TestEnum.class); + } + + public Boolean test_collection_type(Mosaic_IO io){ + java.util.List list = java.util.Arrays.asList(1, 2, 3); + Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(list); + return mip.get_type().getName().equals("java.util.Arrays$ArrayList"); + } + + public Boolean test_extreme_primitive_values(Mosaic_IO io){ + Mosaic_IsPrimitive mipMax = Mosaic_IsPrimitive.make(Integer.MAX_VALUE); + Mosaic_IsPrimitive mipMin = Mosaic_IsPrimitive.make(Integer.MIN_VALUE); + Mosaic_IsPrimitive mipNaN = Mosaic_IsPrimitive.make(Double.NaN); + return mipMax.get_type().equals(int.class) + && mipMin.get_type().equals(int.class) + && mipNaN.get_type().equals(double.class); + } + } + + public static void main(String[] args){ + TestSuite suite = new IsPrimitive().new TestSuite(); + int result = Mosaic_Testbench.run(suite); + System.exit(result); + } +} diff --git a/tester/javac/Logger.java b/tester/javac/Logger.java new file mode 100644 index 0000000..c4eb84f --- /dev/null +++ b/tester/javac/Logger.java @@ -0,0 +1,31 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_IO; +import com.ReasoningTechnology.Mosaic.Mosaic_Logger; + +public class Logger{ + + public class TestSuite{ + public Boolean smoke_test_logging(Mosaic_IO io){ + try{ + Mosaic_Logger logger = new Mosaic_Logger(); + logger.message("smoke_test_logging", "This is a smoke test for logging."); + return true; + }catch (Exception e){ + e.printStackTrace(); + return false; + } + } + } + + public static void main(String[] args){ + TestSuite suite = new Logger().new TestSuite(); + boolean result = suite.smoke_test_logging(null); + + if(result){ + System.out.println("Test passed: 'smoke_test_logging'"); + System.exit(0); + }else{ + System.err.println("Test failed: 'smoke_test_logging'"); + System.exit(1); + } + } +} diff --git a/tester/javac/MockClass_0.java b/tester/javac/MockClass_0.java new file mode 100644 index 0000000..923661c --- /dev/null +++ b/tester/javac/MockClass_0.java @@ -0,0 +1,98 @@ +/* -------------------------------------------------------------------------------- + Integration tests directly simulate the use cases for Mosaic_Testbench. + Each test method validates a specific feature of Mosaic_Testbench ,including pass, + fail ,error handling ,and I/O interactions. +*/ + +import java.util.Scanner; +import com.ReasoningTechnology.Mosaic.Mosaic_IO; +import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; + +public class MockClass_0{ + + public class TestSuite{ + + public TestSuite() { + // no special initialization of data for this test + } + + public Boolean test_failure_0(Mosaic_IO io){ + return false; + } + + // returns a non-Boolean + public Object test_failure_1(Mosaic_IO io){ + return 1; + } + + // has an uncaught error + public Boolean test_failure_2(Mosaic_IO io) throws Exception { + throw new Exception("Intentional exception for testing error handling"); + } + + // extraneous characters on stdout + public Boolean test_failure_3(Mosaic_IO io) throws Exception { + System.out.println("Intentional extraneous chars to stdout for testing"); + return true; + } + + // extraneous characters on stderr + public Boolean test_failure_4(Mosaic_IO io) throws Exception { + System.err.println("Intentional extraneous chars to stderr for testing."); + return true; + } + + public Boolean test_success_0(Mosaic_IO io){ + return true; + } + + // pushing input for testing + + public Boolean test_success_1(Mosaic_IO io){ + io.push_input("input for the fut"); + + Scanner scanner = new Scanner(System.in); + String result = scanner.nextLine(); + scanner.close(); + + Boolean flag = result.equals("input for the fut"); + return flag; + } + + // checking fut stdout + public Boolean test_success_2(Mosaic_IO io){ + System.out.println("fut stdout"); // suppose the fut does this: + String peek_at_futs_output = io.get_out_content(); + Boolean flag0 = io.has_out_content(); + Boolean flag1 = peek_at_futs_output.equals("fut stdout\n"); + io.clear_buffers(); // otherwise extraneous chars will cause an fail + return flag0 && flag1; + } + + // checking fut stderr + public Boolean test_success_3(Mosaic_IO io){ + System.err.print("fut stderr"); // suppose the fut does this: + String peek_at_futs_output = io.get_err_content(); + Boolean flag0 = io.has_err_content(); + Boolean flag1 = peek_at_futs_output.equals("fut stderr"); + io.clear_buffers(); // otherwise extraneous chars will cause an fail + return flag0 && flag1; + } + + } + + public static void main(String[] args) { + MockClass_0 outer = new MockClass_0(); + TestSuite suite = outer.new TestSuite(); // Non-static instantiation + + /* for debug + Mosaic_IO io = new Mosaic_IO(); + io.redirect(); + suite.test_success_2(io); + */ + + int result = Mosaic_Testbench.run(suite); // Pass the suite instance to Mosaic_Testbench + System.exit(result); + } + +} diff --git a/tester/javac/TestClasses_0.java b/tester/javac/TestClasses_0.java new file mode 100644 index 0000000..f1dde59 --- /dev/null +++ b/tester/javac/TestClasses_0.java @@ -0,0 +1,59 @@ +package tester; + +/* + These are used for testing that Mosaic can be used for white box + testing. Mosaic tests for Mosaic itself access each of these as + part of regression. Users are welcome to also check accessing these + when debugging any access problems that might arise. +*/ + +// Public class with public and private methods +public class TestClasses_0{ + public boolean a_public_method_1(){ + return true; + } + + private boolean a_private_method_2(){ + return true; + } + + public class APublicClass_01{ + public boolean a_public_method_3(){ + return true; + } + + private boolean a_private_method_4(){ + return true; + } + } + + private class APrivateClass_02{ + public boolean a_public_method_5(){ + return true; + } + + private boolean a_private_method_6(){ + return true; + } + } + + public static boolean a_public_static_method_7(){ + return true; + } + + private static boolean a_private_static_method_9(){ + return true; + } + +} + +// Default (package-private) class with public and private methods +class DefaultTestClass_01{ + public boolean a_public_method_7(){ + return true; + } + + private boolean a_private_method_8(){ + return true; + } +} diff --git a/tester/javac/TestClasses_1.java b/tester/javac/TestClasses_1.java new file mode 100644 index 0000000..7c425ce --- /dev/null +++ b/tester/javac/TestClasses_1.java @@ -0,0 +1,31 @@ +package tester; + +/* + These are used for testing that Mosaic can be used for white box + testing. Mosaic tests for Mosaic itself access each of these as + part of regression. Users are welcome to also check accessing these + when debugging any access problems that might arise. +*/ + +// Public class with public and private methods +public class TestClasses_1 { + + private int i; + + public TestClasses_1(){ + i = 0; + } + + public TestClasses_1(int a){ + i = a; + } + + public TestClasses_1(int a ,int b){ + i = a + b; + } + + public int get_i() { + return i; + } + +} diff --git a/tester/javac/TestClasses_2.java b/tester/javac/TestClasses_2.java new file mode 100644 index 0000000..42a6e82 --- /dev/null +++ b/tester/javac/TestClasses_2.java @@ -0,0 +1,43 @@ +package tester; + +public class TestClasses_2{ + + // Static fields + public static int i_200; + private static String s_201; + + // Instance fields + public Integer i_202; + private Integer i_203; + + // Nested class + public static class Class_Nested_21{ + public static Integer i_210; + private static String s_211; + + public Integer i_212; + private Integer i_213; + + public static void initialize_static_fields(){ + i_210=210; + s_211="Static Nested Private String"; + } + + public void initialize_instance_fields(){ + i_212=212; + i_213=213; + } + } + + public static void initialize_static_fields(){ + i_200=200; + s_201="Static Private String"; + } + + public void initialize_instance_fields(){ + i_202=202; + i_203=203; + } +} + + diff --git a/tester/javac/Testbench.java b/tester/javac/Testbench.java new file mode 100644 index 0000000..070365e --- /dev/null +++ b/tester/javac/Testbench.java @@ -0,0 +1,82 @@ +import java.lang.reflect.Method; +import com.ReasoningTechnology.Mosaic.Mosaic_IO; +import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; + +public class Testbench { + + /* -------------------------------------------------------------------------------- + Test methods to validate Testbench functionality + Each method tests a specific aspect of the Testbench class, with a focus on + ensuring that well-formed and ill-formed test cases are correctly identified + and handled. + */ + + // Tests if a correctly formed method is recognized as well-formed by Testbench + public static Boolean test_method_is_wellformed_0(Mosaic_IO io) { + try { + Method validMethod = Testbench.class.getMethod("dummy_test_method", Mosaic_IO.class); + return Boolean.TRUE.equals(Mosaic_Testbench.method_is_wellformed(validMethod)); + } catch (NoSuchMethodException e) { + return false; + } + } + + // Tests if a method with an invalid return type is identified as malformed by Testbench + public static Boolean test_method_is_wellformed_1(Mosaic_IO io) { + System.out.println("Expected output: Structural problem message for dummy_invalid_return_method."); + try { + Method invalidReturnMethod = Testbench.class.getMethod("dummy_invalid_return_method", Mosaic_IO.class); + return Boolean.FALSE.equals(Mosaic_Testbench.method_is_wellformed(invalidReturnMethod)); + } catch (NoSuchMethodException e) { + return false; + } + } + + // Tests if a valid test method runs successfully with the Testbench + public static Boolean test_run_test_0(Mosaic_IO io) { + try { + Method validMethod = Testbench.class.getMethod("dummy_test_method", Mosaic_IO.class); + return Boolean.TRUE.equals(Mosaic_Testbench.run_test(new Testbench(), validMethod, io)); + } catch (NoSuchMethodException e) { + return false; + } + } + + /* Dummy methods for testing */ + public Boolean dummy_test_method(Mosaic_IO io) { + return true; // Simulates a passing test case + } + + public void dummy_invalid_return_method(Mosaic_IO io) { + // Simulates a test case with an invalid return type + } + + /* -------------------------------------------------------------------------------- + Manually run all tests and summarize results without using Testbench itself. + Each test's name is printed if it fails, and only pass/fail counts are summarized. + */ + public static int run() { + int passed_tests = 0; + int failed_tests = 0; + Mosaic_IO io = new Mosaic_IO(); + + if (test_method_is_wellformed_0(io)) passed_tests++; else { System.out.println("test_method_is_wellformed_0"); failed_tests++; } + if (test_method_is_wellformed_1(io)) passed_tests++; else { System.out.println("test_method_is_wellformed_1"); failed_tests++; } + if (test_run_test_0(io)) passed_tests++; else { System.out.println("test_run_test_0"); failed_tests++; } + + // Summary for all the tests + System.out.println("Testbench Total tests run: " + (passed_tests + failed_tests)); + System.out.println("Testbench Total tests passed: " + passed_tests); + System.out.println("Testbench Total tests failed: " + failed_tests); + + return (failed_tests > 0) ? 1 : 0; + } + + /* -------------------------------------------------------------------------------- + Main method for shell interface, sets the exit status based on test results + */ + public static void main(String[] args) { + int exitCode = run(); + System.exit(exitCode); + } +} diff --git a/tester/javac/Util.java b/tester/javac/Util.java new file mode 100644 index 0000000..78e30c9 --- /dev/null +++ b/tester/javac/Util.java @@ -0,0 +1,82 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; + +/* +Util + +*/ + +public class Util{ + + public static Boolean test_all(){ + // Test with zero condition + Boolean[] condition0 = {}; + Boolean result = !Mosaic_Quantifier.all(condition0); // Empty condition list is false. + + // Test with one condition + Boolean[] condition1_true = {true}; + Boolean[] condition1_false = {false}; + result &= Mosaic_Quantifier.all(condition1_true); // should return true + result &= !Mosaic_Quantifier.all(condition1_false); // should return false + + // Test with two condition + Boolean[] condition2_true = {true, true}; + Boolean[] condition2_false1 = {true, false}; + Boolean[] condition2_false2 = {false, true}; + Boolean[] condition2_false3 = {false, false}; + result &= Mosaic_Quantifier.all(condition2_true); // should return true + result &= !Mosaic_Quantifier.all(condition2_false1); // should return false + result &= !Mosaic_Quantifier.all(condition2_false2); // should return false + result &= !Mosaic_Quantifier.all(condition2_false3); // should return false + + // Test with three condition + Boolean[] condition3_false1 = {true, true, false}; + Boolean[] condition3_true = {true, true, true}; + Boolean[] condition3_false2 = {true, false, true}; + Boolean[] condition3_false3 = {false, true, true}; + Boolean[] condition3_false4 = {false, false, false}; + result &= !Mosaic_Quantifier.all(condition3_false1); // should return false + result &= Mosaic_Quantifier.all(condition3_true); // should return true + result &= !Mosaic_Quantifier.all(condition3_false2); // should return false + result &= !Mosaic_Quantifier.all(condition3_false3); // should return false + result &= !Mosaic_Quantifier.all(condition3_false4); // should return false + + return result; + } + + public static Boolean test_all_set_false(){ + Boolean[] condition_list = {true, true, true}; + Mosaic_Quantifier.all_set_false(condition_list); + return !condition_list[0] && !condition_list[1] && !condition_list[2]; + } + + public static Boolean test_all_set_true(){ + Boolean[] condition_list = {false, false, false}; + Mosaic_Quantifier.all_set_true(condition_list); + return condition_list[0] && condition_list[1] && condition_list[2]; + } + + public static int run(){ + Boolean[] condition_list = new Boolean[3]; + condition_list[0] = test_all(); + condition_list[1] = test_all_set_false(); + condition_list[2] = test_all_set_true(); + + if( + !condition_list[0] + || !condition_list[1] + || !condition_list[2] + ){ + System.out.println("Util failed"); + return 1; + } + System.out.println("Util passed"); + return 0; + } + + // Main function to provide a shell interface for running tests + public static void main(String[] args){ + int return_code = run(); + System.exit(return_code); + return; + } +} diff --git a/tester/javac/smoke.java b/tester/javac/smoke.java new file mode 100644 index 0000000..567b8f6 --- /dev/null +++ b/tester/javac/smoke.java @@ -0,0 +1,34 @@ +import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; + +/* +Plug it in, see if there is smoke. There usually is. + +*/ + +public class smoke{ + + public static Boolean test_is_true(){ + return true; + } + + public static int run(){ + Boolean[] condition = new Boolean[1]; + condition[0] = test_is_true(); + + int i = 0; + if( !Mosaic_Quantifier.all(condition) ){ + System.out.println("Test0 failed"); + return 1; + } + System.out.println("Test0 passed"); + return 0; + } + + // Main function to provide a shell interface for running tests + public static void main(String[] args){ + int return_code = run(); + System.exit(return_code); + return; + } + +} diff --git "a/tester/javac\360\237\226\211/Dispatcher_0.java" "b/tester/javac\360\237\226\211/Dispatcher_0.java" deleted file mode 100644 index 7f4e1f2..0000000 --- "a/tester/javac\360\237\226\211/Dispatcher_0.java" +++ /dev/null @@ -1,167 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; -import com.ReasoningTechnology.Mosaic.Mosaic_IsPrimitive; -import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; - -import tester.TestClasses_0; -import tester.TestClasses_1; - -public class Dispatcher_0{ - - private static Mosaic_Dispatcher dispatcher; - - static{ - // Initialize the dispatcher for TestClasses_0 - Mosaic_Dispatcher.test_switch(true); - } - - public Dispatcher_0(){ - Mosaic_Dispatcher.test_print("making map for TestClasses_0"); - dispatcher = new Mosaic_Dispatcher(TestClasses_0.class); - } - - // Test method to access the public method of the public class - public static boolean test_publicClass_publicMethod(){ - Object instance = new TestClasses_0(); - boolean result = dispatcher.dispatch - ( - instance // target instance - ,boolean.class // return type - ,"a_public_method_1" // method name - ); - - return result; - } - - public static boolean test_make_0(){ - Boolean[] condition_list = new Boolean[4]; - Mosaic_Quantifier.all_set_false(condition_list); - int i = 0; - - Mosaic_Dispatcher d1 = new Mosaic_Dispatcher(TestClasses_1.class); - - TestClasses_1 tc0 = new TestClasses_1(); - condition_list[i++] = tc0.get_i() == 0; - - TestClasses_1 tc1 = (TestClasses_1) d1.make(); - condition_list[i++] = tc1.get_i() == 0; - - TestClasses_1 tc2 = (TestClasses_1) d1.make(new Mosaic_IsPrimitive(7)); - condition_list[i++] = tc2.get_i() == 7; - - TestClasses_1 tc3 = (TestClasses_1) d1.make(new Mosaic_IsPrimitive(21) ,new Mosaic_IsPrimitive(17) ); - condition_list[i++] = tc3.get_i() == 38; - - return Mosaic_Quantifier.all(condition_list); - } - - // Test public static method - public static boolean test_publicStaticMethod_7(){ - boolean result = dispatcher.dispatch( - boolean.class, // return type - "a_public_static_method_7" // method name - ); - return result; - } - - // Test private static method - public static boolean test_privateStaticMethod_9(){ - boolean result = dispatcher.dispatch( - boolean.class, // return type - "a_private_static_method_9" // method name - ); - return result; - } - - public static boolean test_defaultClass_access(){ - try{ - Mosaic_Dispatcher d2=new Mosaic_Dispatcher("tester.DefaultTestClass_01"); - Object instance=d2.make(); - boolean result1=d2.dispatch( - instance // target instance - ,boolean.class // return type - ,"a_public_method_7" // public method name - ); - - boolean result2=d2.dispatch( - instance // target instance - ,boolean.class // return type - ,"a_private_method_8" // private method name - ); - - return result1 && result2; - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - // Extend the run method to include static method tests - public static boolean run(){ - try{ - boolean result = true; - - System.out.println(""); - System.out.println("running test: publicClass_publicMethod"); - if (Boolean.TRUE.equals(test_publicClass_publicMethod())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: make_0"); - if (Boolean.TRUE.equals(test_make_0())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: publicStaticMethod_7"); - if (Boolean.TRUE.equals(test_publicStaticMethod_7())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: privateStaticMethod_9"); - if (Boolean.TRUE.equals(test_privateStaticMethod_9())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: defaultClass_access"); - if(Boolean.TRUE.equals(test_defaultClass_access())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - return result; - - }catch (Exception e){ - System.out.println("Exception in Dispatcher_0 test:"); - e.printStackTrace(); - return false; - } - } - - public static void main(String[] args){ - // Execute the run method and return its result as the exit code - new Dispatcher_0(); - if( run() ) - System.exit(0); - else - System.exit(1); - } - -} diff --git "a/tester/javac\360\237\226\211/Dispatcher_1.java" "b/tester/javac\360\237\226\211/Dispatcher_1.java" deleted file mode 100644 index 4758510..0000000 --- "a/tester/javac\360\237\226\211/Dispatcher_1.java" +++ /dev/null @@ -1,111 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; -import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; - -import tester.TestClasses_0; - -public class Dispatcher_1{ - - private static Mosaic_Dispatcher dispatcher; - - static{ - dispatcher = new Mosaic_Dispatcher(TestClasses_0.class); - } - - public static boolean test_publicMethod_1(){ - TestClasses_0 instance = new TestClasses_0(); - return dispatcher.dispatch(instance, boolean.class, "a_public_method_1"); - } - - public static boolean test_privateMethod_2(){ - TestClasses_0 instance = new TestClasses_0(); - return dispatcher.dispatch(instance, boolean.class, "a_private_method_2"); - } - - public static boolean test_nestedPublicMethod_3(){ - try{ - TestClasses_0 outer = new TestClasses_0(); - TestClasses_0.APublicClass_01 nested = outer.new APublicClass_01(); - Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); - return nested_dispatcher.dispatch(nested, boolean.class, "a_public_method_3"); - } catch(Exception e){ - e.printStackTrace(); - return false; - } - } - - public static boolean test_nestedPrivateMethod_4(){ - try{ - TestClasses_0 outer = new TestClasses_0(); - TestClasses_0.APublicClass_01 nested = outer.new APublicClass_01(); - Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); - return nested_dispatcher.dispatch(nested, boolean.class, "a_private_method_4"); - } catch(Exception e){ - e.printStackTrace(); - return false; - } - } - - public static boolean run(){ - try{ - boolean result = true; - - System.out.println(""); - System.out.println("running test: publicMethod_1"); - if(Boolean.TRUE.equals(test_publicMethod_1())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: privateMethod_2"); - if(Boolean.TRUE.equals(test_privateMethod_2())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: nestedPublicMethod_3"); - if(Boolean.TRUE.equals(test_nestedPublicMethod_3())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: nestedPrivateMethod_4"); - if(Boolean.TRUE.equals(test_nestedPrivateMethod_4())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - return result; - - }catch(Exception e){ - System.out.println("Exception in Dispatcher_1 test:"); - e.printStackTrace(); - return false; - } - } - - private static boolean logPass(){ - System.out.println("passed"); - return true; - } - - private static boolean logFail(){ - System.out.println("FAILED"); - return false; - } - - public static void main(String[] args){ - System.exit(run() ? 0 : 1); - } -} diff --git "a/tester/javac\360\237\226\211/Dispatcher_2.java" "b/tester/javac\360\237\226\211/Dispatcher_2.java" deleted file mode 100644 index cb1bb97..0000000 --- "a/tester/javac\360\237\226\211/Dispatcher_2.java" +++ /dev/null @@ -1,199 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; - -import tester.TestClasses_2; - -public class Dispatcher_2{ - - private static Mosaic_Dispatcher dispatcher; - - static{ - TestClasses_2.initialize_static_fields(); - dispatcher=new Mosaic_Dispatcher(TestClasses_2.class); - } - - public static boolean test_publicStaticField(){ - try{ - Integer value=dispatcher.read("i_200"); - return value != null && value == 200; - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean test_privateStaticField(){ - try{ - String value=dispatcher.read("s_201"); - return value != null && value.equals("Static Private String"); - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean test_publicInstanceField(){ - try{ - TestClasses_2 instance=dispatcher.make(); - instance.initialize_instance_fields(); - Integer value=dispatcher.read(instance,"i_202"); - return value != null && value == 202; - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean test_privateInstanceField(){ - try{ - TestClasses_2 instance=dispatcher.make(); - instance.initialize_instance_fields(); - Integer value=dispatcher.read(instance,"i_203"); - return value != null && value == 203; - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean test_writePublicStaticField(){ - try{ - dispatcher.write("i_200",300); - Integer value=dispatcher.read("i_200"); - return value != null && value == 300; - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean test_writePrivateStaticField(){ - try{ - dispatcher.write("s_201","New Static Private String"); - String value=dispatcher.read("s_201"); - return value != null && value.equals("New Static Private String"); - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean test_writePublicInstanceField(){ - try{ - TestClasses_2 instance=dispatcher.make(); - dispatcher.write(instance,"i_202",400); - Integer value=dispatcher.read(instance,"i_202"); - return value != null && value == 400; - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean test_writePrivateInstanceField(){ - try{ - TestClasses_2 instance=dispatcher.make(); - dispatcher.write(instance,"i_203",500); - Integer value=dispatcher.read(instance,"i_203"); - return value != null && value == 500; - }catch(Throwable t){ - t.printStackTrace(); - return false; - } - } - - public static boolean run(){ - try{ - boolean result=true; - - // Existing read tests - System.out.println(""); - System.out.println("running test: publicStaticField"); - if(Boolean.TRUE.equals(test_publicStaticField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - System.out.println("running test: privateStaticField"); - if(Boolean.TRUE.equals(test_privateStaticField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - System.out.println("running test: publicInstanceField"); - if(Boolean.TRUE.equals(test_publicInstanceField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - System.out.println("running test: privateInstanceField"); - if(Boolean.TRUE.equals(test_privateInstanceField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - // New write tests - System.out.println(""); - System.out.println("running test: writePublicStaticField"); - if(Boolean.TRUE.equals(test_writePublicStaticField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - System.out.println("running test: writePrivateStaticField"); - if(Boolean.TRUE.equals(test_writePrivateStaticField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - System.out.println("running test: writePublicInstanceField"); - if(Boolean.TRUE.equals(test_writePublicInstanceField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - System.out.println("running test: writePrivateInstanceField"); - if(Boolean.TRUE.equals(test_writePrivateInstanceField())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result=false; - } - - System.out.println(""); - return result; - - }catch(Exception e){ - System.out.println("Exception in Dispatcher_2 test:"); - e.printStackTrace(); - return false; - } - } - - public static void main(String[] args){ - if(run()){ - System.exit(0); - }else{ - System.exit(1); - } - } -} diff --git "a/tester/javac\360\237\226\211/Dispatcher_3.java" "b/tester/javac\360\237\226\211/Dispatcher_3.java" deleted file mode 100644 index e79a1af..0000000 --- "a/tester/javac\360\237\226\211/Dispatcher_3.java" +++ /dev/null @@ -1,132 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_Dispatcher; - -import tester.TestClasses_0; - -public class Dispatcher_3{ - - public static boolean test_privateNestedClassPublicMethod(){ - try{ - TestClasses_0 outer_instance = new TestClasses_0(); - Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher("tester.TestClasses_0$APrivateClass_02"); - Object nested_instance = nested_dispatcher.make(new Object[]{outer_instance}); - boolean result = nested_dispatcher.dispatch( - nested_instance - ,boolean.class - ,"a_public_method_5" - ); - return result; - }catch(Throwable t){ - System.out.println("Exception in test_privateNestedClassPublicMethod:"); - t.printStackTrace(); - return false; - } - } - - public static boolean test_privateNestedClassPrivateMethod(){ - try{ - TestClasses_0 outer_instance = new TestClasses_0(); - Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher("tester.TestClasses_0$APrivateClass_02"); - Object nested_instance = nested_dispatcher.make(new Object[]{outer_instance}); - boolean result = nested_dispatcher.dispatch( - nested_instance - ,boolean.class - ,"a_private_method_6" - ); - return result; - }catch(Throwable t){ - System.out.println("Exception in test_privateNestedClassPrivateMethod:"); - t.printStackTrace(); - return false; - } - } - - public static boolean test_publicNestedClassPublicMethod(){ - try{ - TestClasses_0 outer = new TestClasses_0(); - TestClasses_0.APublicClass_01 nested_instance = outer.new APublicClass_01(); - Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); - boolean result = nested_dispatcher.dispatch( - nested_instance - ,boolean.class - ,"a_public_method_3" - ); - return result; - }catch(Throwable t){ - System.out.println("Exception in test_publicNestedClassPublicMethod:"); - t.printStackTrace(); - return false; - } - } - - public static boolean test_publicNestedClassPrivateMethod(){ - try{ - TestClasses_0 outer = new TestClasses_0(); - TestClasses_0.APublicClass_01 nested_instance = outer.new APublicClass_01(); - Mosaic_Dispatcher nested_dispatcher = new Mosaic_Dispatcher(TestClasses_0.APublicClass_01.class); - boolean result = nested_dispatcher.dispatch( - nested_instance - ,boolean.class - ,"a_private_method_4" - ); - return result; - }catch(Throwable t){ - System.out.println("Exception in test_publicNestedClassPrivateMethod:"); - t.printStackTrace(); - return false; - } - } - - public static boolean run(){ - try{ - boolean result = true; - - System.out.println(""); - System.out.println("running test: privateNestedClassPublicMethod"); - if(Boolean.TRUE.equals(test_privateNestedClassPublicMethod())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: privateNestedClassPrivateMethod"); - if(Boolean.TRUE.equals(test_privateNestedClassPrivateMethod())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: publicNestedClassPublicMethod"); - if(Boolean.TRUE.equals(test_publicNestedClassPublicMethod())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - System.out.println("running test: publicNestedClassPrivateMethod"); - if(Boolean.TRUE.equals(test_publicNestedClassPrivateMethod())){ - System.out.println("passed"); - }else{ - System.out.println("FAILED"); - result = false; - } - - System.out.println(""); - return result; - - }catch(Exception e){ - System.out.println("Exception in Dispatcher_3 test:"); - e.printStackTrace(); - return false; - } - } - - public static void main(String[] args){ - System.exit(run() ? 0 : 1); - } -} diff --git "a/tester/javac\360\237\226\211/FunctionSignature_0.java" "b/tester/javac\360\237\226\211/FunctionSignature_0.java" deleted file mode 100644 index d45e2cc..0000000 --- "a/tester/javac\360\237\226\211/FunctionSignature_0.java" +++ /dev/null @@ -1,42 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_AllMethodsPublicProxy; -import com.ReasoningTechnology.Mosaic.Mosaic_IO; -import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; - -public class FunctionSignature_0 { - - public static class TestSuite { - - private static Mosaic_AllMethodsPublicProxy proxy; - - static { - try { - proxy = new Mosaic_AllMethodsPublicProxy("com.ReasoningTechnology.Mosaic.FunctionSignature"); - } catch (ClassNotFoundException e) { - System.err.println("Failed to initialize proxy: " + e.getMessage()); - } - } - - public Boolean smoke_test_0(Mosaic_IO io) { - try { - // Create a FunctionSignature instance via the proxy constructor - Object signature = proxy.construct("", "testMethod", new Class[]{}); - - // Call the toString method on the proxy instance - String result = (String) proxy.invoke(signature, "toString"); - - // Check expected output - return "testMethod()".equals(result); - } catch (Exception e) { - System.err.println("Test failed: " + e.getMessage()); - e.printStackTrace(); - return false; - } - } - } - - public static void main(String[] args) { - TestSuite suite = new TestSuite(); - int result = Mosaic_Testbench.run(suite); - System.exit(result); - } -} diff --git "a/tester/javac\360\237\226\211/IO.java" "b/tester/javac\360\237\226\211/IO.java" deleted file mode 100644 index 5615a09..0000000 --- "a/tester/javac\360\237\226\211/IO.java" +++ /dev/null @@ -1,73 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_IO; -import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; - -public class IO{ - - public static int fut(){ - try{ - // Echo some characters from stdin to stdout - System.out.print((char) System.in.read()); - System.out.print((char) System.in.read()); - - // Echo some more characters from stdin to stderr - System.err.print((char) System.in.read()); - System.err.print((char) System.in.read()); - - // Count remaining characters until EOF - int count = 0; - while(System.in.read() != -1){ - count++; - } - - return count; - } catch(Exception e){ - e.printStackTrace(); - return -1; // Error case - } - } - - public static int run(){ - Mosaic_IO io = new Mosaic_IO(); - Boolean[] condition = new Boolean[3]; - - // Redirect IO streams - io.redirect(); - - // Provide input for the function under test - io.push_input("abcdefg"); - - // Execute function under test - int result = fut(); - - // Check stdout content - String stdout_string = io.get_out_content(); - condition[0] = stdout_string.equals("ab"); - - // Check stderr content - String stderr_string = io.get_err_content(); - condition[1] = stderr_string.equals("cd"); - - // Check returned character count (3 remaining characters: 'e','f','g') - condition[2] = result == 3; - - // Restore original IO streams - io.restore(); - - if(!Mosaic_Quantifier.all(condition)){ - System.out.println("IO failed"); - return 1; - } - System.out.println("IO passed"); - return 0; - } - - // Main function to provide a shell interface for running tests - public static void main(String[] args){ - int return_code = run(); - System.exit(return_code); - return; - } - -} - - diff --git "a/tester/javac\360\237\226\211/IsPrimitive.java" "b/tester/javac\360\237\226\211/IsPrimitive.java" deleted file mode 100644 index 9b6aa2d..0000000 --- "a/tester/javac\360\237\226\211/IsPrimitive.java" +++ /dev/null @@ -1,118 +0,0 @@ -/* -------------------------------------------------------------------------------- - Integration tests directly simulate the use cases for Mosaic_Testbench. - Each test method validates a specific feature of Mosaic_Testbench ,including pass, - fail ,error handling ,and I/O interactions. -*/ -import java.util.Scanner; - -import com.ReasoningTechnology.Mosaic.Mosaic_IO; -import com.ReasoningTechnology.Mosaic.Mosaic_IsPrimitive; -import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; - -import com.ReasoningTechnology.Mosaic.Mosaic_IO; -import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; -import com.ReasoningTechnology.Mosaic.Mosaic_IsPrimitive; - -public class IsPrimitive{ - - public class TestSuite{ - - - public Boolean test_int_type(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(42); - return mip.get_type().equals(int.class); - } - - public Boolean test_boolean_type(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(true); - return mip.get_type().equals(boolean.class); - } - - public Boolean test_double_type(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(3.14); - return mip.get_type().equals(double.class); - } - - public Boolean test_string_type(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make("hello"); - return mip.get_type().equals(String.class); - } - - public Boolean test_object_type(Mosaic_IO io){ - Object obj = new Object(); - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(obj); - return mip.get_type().equals(Object.class); - } - - public Boolean test_char_type(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make('a'); - return mip.get_type().equals(char.class); - } - - public Boolean test_null_value(Mosaic_IO io){ - try{ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(null); - return mip.get_type() == null; // Should handle gracefully or throw - } catch (Exception e){ - return false; - } - } - - public Boolean test_empty_string(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(""); - return mip.get_type().equals(String.class); - } - - public Boolean test_blank_string(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(" "); - return mip.get_type().equals(String.class); - } - - // When passing arguments through Object types, there is no way - // for the callee to know if the caller sent a primitive type or a - // boxed value. This is the point of having IsPrimitive. - // IsPrimitive indicates that we really mean to send the primitive - // type, though it appears in the box. - public Boolean test_primitive_wrapper(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(Integer.valueOf(42)); - return mip.get_type().equals(int.class); - } - - public Boolean test_primitive_array(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(new int[]{1, 2, 3}); - return mip.get_type().equals(int[].class); - } - - public Boolean test_object_array(Mosaic_IO io){ - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(new String[]{"a", "b", "c"}); - return mip.get_type().equals(String[].class); - } - - public Boolean test_enum_type(Mosaic_IO io){ - enum TestEnum{ VALUE1, VALUE2 } - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(TestEnum.VALUE1); - return mip.get_type().equals(TestEnum.class); - } - - public Boolean test_collection_type(Mosaic_IO io){ - java.util.List list = java.util.Arrays.asList(1, 2, 3); - Mosaic_IsPrimitive mip = Mosaic_IsPrimitive.make(list); - return mip.get_type().getName().equals("java.util.Arrays$ArrayList"); - } - - public Boolean test_extreme_primitive_values(Mosaic_IO io){ - Mosaic_IsPrimitive mipMax = Mosaic_IsPrimitive.make(Integer.MAX_VALUE); - Mosaic_IsPrimitive mipMin = Mosaic_IsPrimitive.make(Integer.MIN_VALUE); - Mosaic_IsPrimitive mipNaN = Mosaic_IsPrimitive.make(Double.NaN); - return mipMax.get_type().equals(int.class) - && mipMin.get_type().equals(int.class) - && mipNaN.get_type().equals(double.class); - } - } - - public static void main(String[] args){ - TestSuite suite = new IsPrimitive().new TestSuite(); - int result = Mosaic_Testbench.run(suite); - System.exit(result); - } -} diff --git "a/tester/javac\360\237\226\211/Logger.java" "b/tester/javac\360\237\226\211/Logger.java" deleted file mode 100644 index c4eb84f..0000000 --- "a/tester/javac\360\237\226\211/Logger.java" +++ /dev/null @@ -1,31 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_IO; -import com.ReasoningTechnology.Mosaic.Mosaic_Logger; - -public class Logger{ - - public class TestSuite{ - public Boolean smoke_test_logging(Mosaic_IO io){ - try{ - Mosaic_Logger logger = new Mosaic_Logger(); - logger.message("smoke_test_logging", "This is a smoke test for logging."); - return true; - }catch (Exception e){ - e.printStackTrace(); - return false; - } - } - } - - public static void main(String[] args){ - TestSuite suite = new Logger().new TestSuite(); - boolean result = suite.smoke_test_logging(null); - - if(result){ - System.out.println("Test passed: 'smoke_test_logging'"); - System.exit(0); - }else{ - System.err.println("Test failed: 'smoke_test_logging'"); - System.exit(1); - } - } -} diff --git "a/tester/javac\360\237\226\211/MockClass_0.java" "b/tester/javac\360\237\226\211/MockClass_0.java" deleted file mode 100644 index 923661c..0000000 --- "a/tester/javac\360\237\226\211/MockClass_0.java" +++ /dev/null @@ -1,98 +0,0 @@ -/* -------------------------------------------------------------------------------- - Integration tests directly simulate the use cases for Mosaic_Testbench. - Each test method validates a specific feature of Mosaic_Testbench ,including pass, - fail ,error handling ,and I/O interactions. -*/ - -import java.util.Scanner; -import com.ReasoningTechnology.Mosaic.Mosaic_IO; -import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; - -public class MockClass_0{ - - public class TestSuite{ - - public TestSuite() { - // no special initialization of data for this test - } - - public Boolean test_failure_0(Mosaic_IO io){ - return false; - } - - // returns a non-Boolean - public Object test_failure_1(Mosaic_IO io){ - return 1; - } - - // has an uncaught error - public Boolean test_failure_2(Mosaic_IO io) throws Exception { - throw new Exception("Intentional exception for testing error handling"); - } - - // extraneous characters on stdout - public Boolean test_failure_3(Mosaic_IO io) throws Exception { - System.out.println("Intentional extraneous chars to stdout for testing"); - return true; - } - - // extraneous characters on stderr - public Boolean test_failure_4(Mosaic_IO io) throws Exception { - System.err.println("Intentional extraneous chars to stderr for testing."); - return true; - } - - public Boolean test_success_0(Mosaic_IO io){ - return true; - } - - // pushing input for testing - - public Boolean test_success_1(Mosaic_IO io){ - io.push_input("input for the fut"); - - Scanner scanner = new Scanner(System.in); - String result = scanner.nextLine(); - scanner.close(); - - Boolean flag = result.equals("input for the fut"); - return flag; - } - - // checking fut stdout - public Boolean test_success_2(Mosaic_IO io){ - System.out.println("fut stdout"); // suppose the fut does this: - String peek_at_futs_output = io.get_out_content(); - Boolean flag0 = io.has_out_content(); - Boolean flag1 = peek_at_futs_output.equals("fut stdout\n"); - io.clear_buffers(); // otherwise extraneous chars will cause an fail - return flag0 && flag1; - } - - // checking fut stderr - public Boolean test_success_3(Mosaic_IO io){ - System.err.print("fut stderr"); // suppose the fut does this: - String peek_at_futs_output = io.get_err_content(); - Boolean flag0 = io.has_err_content(); - Boolean flag1 = peek_at_futs_output.equals("fut stderr"); - io.clear_buffers(); // otherwise extraneous chars will cause an fail - return flag0 && flag1; - } - - } - - public static void main(String[] args) { - MockClass_0 outer = new MockClass_0(); - TestSuite suite = outer.new TestSuite(); // Non-static instantiation - - /* for debug - Mosaic_IO io = new Mosaic_IO(); - io.redirect(); - suite.test_success_2(io); - */ - - int result = Mosaic_Testbench.run(suite); // Pass the suite instance to Mosaic_Testbench - System.exit(result); - } - -} diff --git "a/tester/javac\360\237\226\211/TestClasses_0.java" "b/tester/javac\360\237\226\211/TestClasses_0.java" deleted file mode 100644 index f1dde59..0000000 --- "a/tester/javac\360\237\226\211/TestClasses_0.java" +++ /dev/null @@ -1,59 +0,0 @@ -package tester; - -/* - These are used for testing that Mosaic can be used for white box - testing. Mosaic tests for Mosaic itself access each of these as - part of regression. Users are welcome to also check accessing these - when debugging any access problems that might arise. -*/ - -// Public class with public and private methods -public class TestClasses_0{ - public boolean a_public_method_1(){ - return true; - } - - private boolean a_private_method_2(){ - return true; - } - - public class APublicClass_01{ - public boolean a_public_method_3(){ - return true; - } - - private boolean a_private_method_4(){ - return true; - } - } - - private class APrivateClass_02{ - public boolean a_public_method_5(){ - return true; - } - - private boolean a_private_method_6(){ - return true; - } - } - - public static boolean a_public_static_method_7(){ - return true; - } - - private static boolean a_private_static_method_9(){ - return true; - } - -} - -// Default (package-private) class with public and private methods -class DefaultTestClass_01{ - public boolean a_public_method_7(){ - return true; - } - - private boolean a_private_method_8(){ - return true; - } -} diff --git "a/tester/javac\360\237\226\211/TestClasses_1.java" "b/tester/javac\360\237\226\211/TestClasses_1.java" deleted file mode 100644 index 7c425ce..0000000 --- "a/tester/javac\360\237\226\211/TestClasses_1.java" +++ /dev/null @@ -1,31 +0,0 @@ -package tester; - -/* - These are used for testing that Mosaic can be used for white box - testing. Mosaic tests for Mosaic itself access each of these as - part of regression. Users are welcome to also check accessing these - when debugging any access problems that might arise. -*/ - -// Public class with public and private methods -public class TestClasses_1 { - - private int i; - - public TestClasses_1(){ - i = 0; - } - - public TestClasses_1(int a){ - i = a; - } - - public TestClasses_1(int a ,int b){ - i = a + b; - } - - public int get_i() { - return i; - } - -} diff --git "a/tester/javac\360\237\226\211/TestClasses_2.java" "b/tester/javac\360\237\226\211/TestClasses_2.java" deleted file mode 100644 index 42a6e82..0000000 --- "a/tester/javac\360\237\226\211/TestClasses_2.java" +++ /dev/null @@ -1,43 +0,0 @@ -package tester; - -public class TestClasses_2{ - - // Static fields - public static int i_200; - private static String s_201; - - // Instance fields - public Integer i_202; - private Integer i_203; - - // Nested class - public static class Class_Nested_21{ - public static Integer i_210; - private static String s_211; - - public Integer i_212; - private Integer i_213; - - public static void initialize_static_fields(){ - i_210=210; - s_211="Static Nested Private String"; - } - - public void initialize_instance_fields(){ - i_212=212; - i_213=213; - } - } - - public static void initialize_static_fields(){ - i_200=200; - s_201="Static Private String"; - } - - public void initialize_instance_fields(){ - i_202=202; - i_203=203; - } -} - - diff --git "a/tester/javac\360\237\226\211/Testbench.java" "b/tester/javac\360\237\226\211/Testbench.java" deleted file mode 100644 index 070365e..0000000 --- "a/tester/javac\360\237\226\211/Testbench.java" +++ /dev/null @@ -1,82 +0,0 @@ -import java.lang.reflect.Method; -import com.ReasoningTechnology.Mosaic.Mosaic_IO; -import com.ReasoningTechnology.Mosaic.Mosaic_Testbench; - -public class Testbench { - - /* -------------------------------------------------------------------------------- - Test methods to validate Testbench functionality - Each method tests a specific aspect of the Testbench class, with a focus on - ensuring that well-formed and ill-formed test cases are correctly identified - and handled. - */ - - // Tests if a correctly formed method is recognized as well-formed by Testbench - public static Boolean test_method_is_wellformed_0(Mosaic_IO io) { - try { - Method validMethod = Testbench.class.getMethod("dummy_test_method", Mosaic_IO.class); - return Boolean.TRUE.equals(Mosaic_Testbench.method_is_wellformed(validMethod)); - } catch (NoSuchMethodException e) { - return false; - } - } - - // Tests if a method with an invalid return type is identified as malformed by Testbench - public static Boolean test_method_is_wellformed_1(Mosaic_IO io) { - System.out.println("Expected output: Structural problem message for dummy_invalid_return_method."); - try { - Method invalidReturnMethod = Testbench.class.getMethod("dummy_invalid_return_method", Mosaic_IO.class); - return Boolean.FALSE.equals(Mosaic_Testbench.method_is_wellformed(invalidReturnMethod)); - } catch (NoSuchMethodException e) { - return false; - } - } - - // Tests if a valid test method runs successfully with the Testbench - public static Boolean test_run_test_0(Mosaic_IO io) { - try { - Method validMethod = Testbench.class.getMethod("dummy_test_method", Mosaic_IO.class); - return Boolean.TRUE.equals(Mosaic_Testbench.run_test(new Testbench(), validMethod, io)); - } catch (NoSuchMethodException e) { - return false; - } - } - - /* Dummy methods for testing */ - public Boolean dummy_test_method(Mosaic_IO io) { - return true; // Simulates a passing test case - } - - public void dummy_invalid_return_method(Mosaic_IO io) { - // Simulates a test case with an invalid return type - } - - /* -------------------------------------------------------------------------------- - Manually run all tests and summarize results without using Testbench itself. - Each test's name is printed if it fails, and only pass/fail counts are summarized. - */ - public static int run() { - int passed_tests = 0; - int failed_tests = 0; - Mosaic_IO io = new Mosaic_IO(); - - if (test_method_is_wellformed_0(io)) passed_tests++; else { System.out.println("test_method_is_wellformed_0"); failed_tests++; } - if (test_method_is_wellformed_1(io)) passed_tests++; else { System.out.println("test_method_is_wellformed_1"); failed_tests++; } - if (test_run_test_0(io)) passed_tests++; else { System.out.println("test_run_test_0"); failed_tests++; } - - // Summary for all the tests - System.out.println("Testbench Total tests run: " + (passed_tests + failed_tests)); - System.out.println("Testbench Total tests passed: " + passed_tests); - System.out.println("Testbench Total tests failed: " + failed_tests); - - return (failed_tests > 0) ? 1 : 0; - } - - /* -------------------------------------------------------------------------------- - Main method for shell interface, sets the exit status based on test results - */ - public static void main(String[] args) { - int exitCode = run(); - System.exit(exitCode); - } -} diff --git "a/tester/javac\360\237\226\211/Util.java" "b/tester/javac\360\237\226\211/Util.java" deleted file mode 100644 index 78e30c9..0000000 --- "a/tester/javac\360\237\226\211/Util.java" +++ /dev/null @@ -1,82 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; - -/* -Util - -*/ - -public class Util{ - - public static Boolean test_all(){ - // Test with zero condition - Boolean[] condition0 = {}; - Boolean result = !Mosaic_Quantifier.all(condition0); // Empty condition list is false. - - // Test with one condition - Boolean[] condition1_true = {true}; - Boolean[] condition1_false = {false}; - result &= Mosaic_Quantifier.all(condition1_true); // should return true - result &= !Mosaic_Quantifier.all(condition1_false); // should return false - - // Test with two condition - Boolean[] condition2_true = {true, true}; - Boolean[] condition2_false1 = {true, false}; - Boolean[] condition2_false2 = {false, true}; - Boolean[] condition2_false3 = {false, false}; - result &= Mosaic_Quantifier.all(condition2_true); // should return true - result &= !Mosaic_Quantifier.all(condition2_false1); // should return false - result &= !Mosaic_Quantifier.all(condition2_false2); // should return false - result &= !Mosaic_Quantifier.all(condition2_false3); // should return false - - // Test with three condition - Boolean[] condition3_false1 = {true, true, false}; - Boolean[] condition3_true = {true, true, true}; - Boolean[] condition3_false2 = {true, false, true}; - Boolean[] condition3_false3 = {false, true, true}; - Boolean[] condition3_false4 = {false, false, false}; - result &= !Mosaic_Quantifier.all(condition3_false1); // should return false - result &= Mosaic_Quantifier.all(condition3_true); // should return true - result &= !Mosaic_Quantifier.all(condition3_false2); // should return false - result &= !Mosaic_Quantifier.all(condition3_false3); // should return false - result &= !Mosaic_Quantifier.all(condition3_false4); // should return false - - return result; - } - - public static Boolean test_all_set_false(){ - Boolean[] condition_list = {true, true, true}; - Mosaic_Quantifier.all_set_false(condition_list); - return !condition_list[0] && !condition_list[1] && !condition_list[2]; - } - - public static Boolean test_all_set_true(){ - Boolean[] condition_list = {false, false, false}; - Mosaic_Quantifier.all_set_true(condition_list); - return condition_list[0] && condition_list[1] && condition_list[2]; - } - - public static int run(){ - Boolean[] condition_list = new Boolean[3]; - condition_list[0] = test_all(); - condition_list[1] = test_all_set_false(); - condition_list[2] = test_all_set_true(); - - if( - !condition_list[0] - || !condition_list[1] - || !condition_list[2] - ){ - System.out.println("Util failed"); - return 1; - } - System.out.println("Util passed"); - return 0; - } - - // Main function to provide a shell interface for running tests - public static void main(String[] args){ - int return_code = run(); - System.exit(return_code); - return; - } -} diff --git "a/tester/javac\360\237\226\211/smoke.java" "b/tester/javac\360\237\226\211/smoke.java" deleted file mode 100644 index 567b8f6..0000000 --- "a/tester/javac\360\237\226\211/smoke.java" +++ /dev/null @@ -1,34 +0,0 @@ -import com.ReasoningTechnology.Mosaic.Mosaic_Quantifier; - -/* -Plug it in, see if there is smoke. There usually is. - -*/ - -public class smoke{ - - public static Boolean test_is_true(){ - return true; - } - - public static int run(){ - Boolean[] condition = new Boolean[1]; - condition[0] = test_is_true(); - - int i = 0; - if( !Mosaic_Quantifier.all(condition) ){ - System.out.println("Test0 failed"); - return 1; - } - System.out.println("Test0 passed"); - return 0; - } - - // Main function to provide a shell interface for running tests - public static void main(String[] args){ - int return_code = run(); - System.exit(return_code); - return; - } - -} diff --git a/tester/tool/clean b/tester/tool/clean new file mode 100755 index 0000000..989b007 --- /dev/null +++ b/tester/tool/clean @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + env_must_be="tester/tool🖉/env" + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 + fi + +# remove files + set -x + cd "$REPO_HOME"/tester + rm_na log/log.txt + rm_na -r scratchpad/* + rm_na jvm/* + rm_na jdwp_server/* + set +x + +echo "$(script_fn) done." diff --git a/tester/tool/env b/tester/tool/env new file mode 100644 index 0000000..6128cf7 --- /dev/null +++ b/tester/tool/env @@ -0,0 +1,134 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="tool_shared/bespoke🖉/env" + error_bad_env=false + error_not_sourced=false + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + error_bad_env=true + fi + if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "$script_afp:: This script must be sourced, not executed." + error_not_sourced=true + fi + if $error_not_sourced; then exit 1; fi + if $error_bad_env; then return 1; fi + +#-------------------------------------------------------------------------------- +# arguments + + if [[ -x "$1" ]]; then MODE=$1; else MODE=release; fi + export MODE + echo "MODE: $MODE" + +#-------------------------------------------------------------------------------- +# so we can do testing + +export PATH=\ +"$REPO_HOME"/tester/tool🖉/\ +:"$JAVA_HOME"/bin\ +:"$PATH" + +# so we can run individual built tests wrappers +export PATH=\ +"$REPO_HOME"/tester/jvm\ +:"$PATH" + +#-------------------------------------------------------------------------------- +# class/source paths + +BASE_CLASSPATH="$JAVA_HOME"/lib:"$REPO_HOME"/tester/log +BASE_SOURCEPATH="$REPO_HOME"/tester/javac🖉 + +case "$MODE" in + +# Classes, and other-than-tester sources if present, come from the release candidate. This is the normal MODE for regression testing. +# +# Release candidate sources, if present, are for viewing only. If sources are present in the release, but can not be read directly from the jar file, expand the jar file onto the scratchpad and replace that include line with this one: +# +# :$REPO_HOME/release/scratchpad\ +# + release) + +export CLASSPATH=\ +"$BASE_CLASSPATH\ +:$REPO_HOME/tester/scratchpad\ +:$REPO_HOME/release/${PROJECT}.jar\ +:$CLASSPATH" + +export SOURCEPATH=\ +"$BASE_SOURCEPATH\ +:$REPO_HOME/release/${PROJECT}.jar\ +:$SOURCEPATH" + + ;; + + +# Classes and other-than-tester sources come from developer/scratchpad. This is the normal MODE for the developer when debugging test failures. +# +# While in env_developer, the developer must make the project and gather third party +# tools and sources into the scratchpad for this to work. +# + developer) + +export CLASSPATH=\ +"$BASE_CLASSPATH\ +:$REPO_HOME/tester/scratchpad\ +:$REPO_HOME/developer/scratchpad\ +:$CLASSPATH" + +export SOURCEPATH=\ +"$BASE_SOURCEPATH\ +:$REPO_HOME/developer/scratchpad\ +:$SOURCEPATH" + + ;; + +# Classes and other-than-tester sources come from tester/scratchpad. This MODE gives the tester complete control over what to include in the test environment. +# +# Tester expands everything to be included into the test environment into the scratchpad. +# +# Any changes made to must be exported to the environment the files came from if they are to persist. +# + local) + +export CLASSPATH=\ +"$BASE_CLASSPATH +:$REPO_HOME/tester/scratchpad\ +:$CLASSPATH" + +export SOURCEPATH=\ +"$BASE_SOURCEPATH\ +:$REPO_HOME/tester/scratchpad\ +:$SOURCEPATH" + + ;; + +# default + + *) + echo "Unknown MODE: $MODE" + return 1 + ;; + + esac + +echo CLASSPATH: +vl echo "$CLASSPATH" +echo SOURCEPATH: +vl echo "$SOURCEPATH" +echo PATH: +vl echo $PATH + +#-------------------------------------------------------------------------------- +# misc + + # make .githolder and .gitignore visible + alias ls="ls -a" + export PROMPT_DECOR="$PROJECT"_tester + export ENV=$(script_fp) + echo ENV "$ENV" + cd "$REPO_HOME"/tester/ diff --git a/tester/tool/list b/tester/tool/list new file mode 100755 index 0000000..273b456 --- /dev/null +++ b/tester/tool/list @@ -0,0 +1,29 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# returns list of tests to be used by 'make' and for 'run' + +# input guards +env_must_be="tester/tool🖉/env" +if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 +fi + +# the list + +echo\ + smoke\ + Logger\ + Util\ + IO\ + Testbench\ + MockClass_0\ + IsPrimitive\ + Dispatcher_0\ + Dispatcher_1\ + Dispatcher_2\ + Dispatcher_3\ +"" + +# Dispatch_1\ diff --git a/tester/tool/make b/tester/tool/make new file mode 100755 index 0000000..1d2f173 --- /dev/null +++ b/tester/tool/make @@ -0,0 +1,57 @@ +#!/bin/env bash +set -x +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + +env_must_be="tester/tool🖉/env" +if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + exit 1 +fi + +echo "Compiling files..." + +set -x +cd $REPO_HOME/tester + +# setup a couple of test classes + +javac -g -d scratchpad javac🖉/TestClasses* + + +# Get the list of tests to compile +# wrapper is a space-separated list +list=$(list) + +# make class files +for file in $list; do + javac -g -d scratchpad "javac🖉/$file.java" +done +set +x + +echo "Making jvm scripts ..." +mkdir -p jvm +for file in $list; do + cat > jvm/$file << EOL +#!/bin/env bash +java $file +EOL + chmod +x jvm/$file + done + +echo "Making jdwp debug server scripts..." +mkdir -p jdwp_server +for file in $list; do + cat > jdwp_server/$file << EOL +#!/bin/env bash +java -agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=*:5005 $file +EOL + chmod +x jdwp_server/$file + done + +echo "$(script_fp) done." + + + +set +x diff --git a/tester/tool/run b/tester/tool/run new file mode 100755 index 0000000..a3bcf3a --- /dev/null +++ b/tester/tool/run @@ -0,0 +1,25 @@ +#!/bin/env bash + +# Ensure REPO_HOME is set +if [ -z "$REPO_HOME" ]; then + echo "Error: REPO_HOME is not set." + exit 1 +fi + +# Navigate to the bash directory +cd "$REPO_HOME/tester/jvm" || exit + +# Get the list of test scripts in the specific order from bash_wrapper_list +list=$(list) +echo list: $list + +# Execute each test in the specified order +for file in $list; do + echo + if [[ -x "$file" && ! -d "$file" ]]; then + echo "... Running $file" + ./"$file" + else + echo "Skipping $file (not executable or is a directory)" + fi +done diff --git "a/tester/tool\360\237\226\211/clean" "b/tester/tool\360\237\226\211/clean" deleted file mode 100755 index 989b007..0000000 --- "a/tester/tool\360\237\226\211/clean" +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - env_must_be="tester/tool🖉/env" - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 - fi - -# remove files - set -x - cd "$REPO_HOME"/tester - rm_na log/log.txt - rm_na -r scratchpad/* - rm_na jvm/* - rm_na jdwp_server/* - set +x - -echo "$(script_fn) done." diff --git "a/tester/tool\360\237\226\211/env" "b/tester/tool\360\237\226\211/env" deleted file mode 100644 index 6128cf7..0000000 --- "a/tester/tool\360\237\226\211/env" +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="tool_shared/bespoke🖉/env" - error_bad_env=false - error_not_sourced=false - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - error_bad_env=true - fi - if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - echo "$script_afp:: This script must be sourced, not executed." - error_not_sourced=true - fi - if $error_not_sourced; then exit 1; fi - if $error_bad_env; then return 1; fi - -#-------------------------------------------------------------------------------- -# arguments - - if [[ -x "$1" ]]; then MODE=$1; else MODE=release; fi - export MODE - echo "MODE: $MODE" - -#-------------------------------------------------------------------------------- -# so we can do testing - -export PATH=\ -"$REPO_HOME"/tester/tool🖉/\ -:"$JAVA_HOME"/bin\ -:"$PATH" - -# so we can run individual built tests wrappers -export PATH=\ -"$REPO_HOME"/tester/jvm\ -:"$PATH" - -#-------------------------------------------------------------------------------- -# class/source paths - -BASE_CLASSPATH="$JAVA_HOME"/lib:"$REPO_HOME"/tester/log -BASE_SOURCEPATH="$REPO_HOME"/tester/javac🖉 - -case "$MODE" in - -# Classes, and other-than-tester sources if present, come from the release candidate. This is the normal MODE for regression testing. -# -# Release candidate sources, if present, are for viewing only. If sources are present in the release, but can not be read directly from the jar file, expand the jar file onto the scratchpad and replace that include line with this one: -# -# :$REPO_HOME/release/scratchpad\ -# - release) - -export CLASSPATH=\ -"$BASE_CLASSPATH\ -:$REPO_HOME/tester/scratchpad\ -:$REPO_HOME/release/${PROJECT}.jar\ -:$CLASSPATH" - -export SOURCEPATH=\ -"$BASE_SOURCEPATH\ -:$REPO_HOME/release/${PROJECT}.jar\ -:$SOURCEPATH" - - ;; - - -# Classes and other-than-tester sources come from developer/scratchpad. This is the normal MODE for the developer when debugging test failures. -# -# While in env_developer, the developer must make the project and gather third party -# tools and sources into the scratchpad for this to work. -# - developer) - -export CLASSPATH=\ -"$BASE_CLASSPATH\ -:$REPO_HOME/tester/scratchpad\ -:$REPO_HOME/developer/scratchpad\ -:$CLASSPATH" - -export SOURCEPATH=\ -"$BASE_SOURCEPATH\ -:$REPO_HOME/developer/scratchpad\ -:$SOURCEPATH" - - ;; - -# Classes and other-than-tester sources come from tester/scratchpad. This MODE gives the tester complete control over what to include in the test environment. -# -# Tester expands everything to be included into the test environment into the scratchpad. -# -# Any changes made to must be exported to the environment the files came from if they are to persist. -# - local) - -export CLASSPATH=\ -"$BASE_CLASSPATH -:$REPO_HOME/tester/scratchpad\ -:$CLASSPATH" - -export SOURCEPATH=\ -"$BASE_SOURCEPATH\ -:$REPO_HOME/tester/scratchpad\ -:$SOURCEPATH" - - ;; - -# default - - *) - echo "Unknown MODE: $MODE" - return 1 - ;; - - esac - -echo CLASSPATH: -vl echo "$CLASSPATH" -echo SOURCEPATH: -vl echo "$SOURCEPATH" -echo PATH: -vl echo $PATH - -#-------------------------------------------------------------------------------- -# misc - - # make .githolder and .gitignore visible - alias ls="ls -a" - export PROMPT_DECOR="$PROJECT"_tester - export ENV=$(script_fp) - echo ENV "$ENV" - cd "$REPO_HOME"/tester/ diff --git "a/tester/tool\360\237\226\211/list" "b/tester/tool\360\237\226\211/list" deleted file mode 100755 index 273b456..0000000 --- "a/tester/tool\360\237\226\211/list" +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# returns list of tests to be used by 'make' and for 'run' - -# input guards -env_must_be="tester/tool🖉/env" -if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 -fi - -# the list - -echo\ - smoke\ - Logger\ - Util\ - IO\ - Testbench\ - MockClass_0\ - IsPrimitive\ - Dispatcher_0\ - Dispatcher_1\ - Dispatcher_2\ - Dispatcher_3\ -"" - -# Dispatch_1\ diff --git "a/tester/tool\360\237\226\211/make" "b/tester/tool\360\237\226\211/make" deleted file mode 100755 index 1d2f173..0000000 --- "a/tester/tool\360\237\226\211/make" +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/env bash -set -x -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - -env_must_be="tester/tool🖉/env" -if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - exit 1 -fi - -echo "Compiling files..." - -set -x -cd $REPO_HOME/tester - -# setup a couple of test classes - -javac -g -d scratchpad javac🖉/TestClasses* - - -# Get the list of tests to compile -# wrapper is a space-separated list -list=$(list) - -# make class files -for file in $list; do - javac -g -d scratchpad "javac🖉/$file.java" -done -set +x - -echo "Making jvm scripts ..." -mkdir -p jvm -for file in $list; do - cat > jvm/$file << EOL -#!/bin/env bash -java $file -EOL - chmod +x jvm/$file - done - -echo "Making jdwp debug server scripts..." -mkdir -p jdwp_server -for file in $list; do - cat > jdwp_server/$file << EOL -#!/bin/env bash -java -agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=*:5005 $file -EOL - chmod +x jdwp_server/$file - done - -echo "$(script_fp) done." - - - -set +x diff --git "a/tester/tool\360\237\226\211/run" "b/tester/tool\360\237\226\211/run" deleted file mode 100755 index a3bcf3a..0000000 --- "a/tester/tool\360\237\226\211/run" +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/env bash - -# Ensure REPO_HOME is set -if [ -z "$REPO_HOME" ]; then - echo "Error: REPO_HOME is not set." - exit 1 -fi - -# Navigate to the bash directory -cd "$REPO_HOME/tester/jvm" || exit - -# Get the list of test scripts in the specific order from bash_wrapper_list -list=$(list) -echo list: $list - -# Execute each test in the specified order -for file in $list; do - echo - if [[ -x "$file" && ! -d "$file" ]]; then - echo "... Running $file" - ./"$file" - else - echo "Skipping $file (not executable or is a directory)" - fi -done diff --git a/tool/.githolder b/tool/.githolder new file mode 100644 index 0000000..e69de29 diff --git a/tool/env b/tool/env new file mode 100644 index 0000000..d3541e6 --- /dev/null +++ b/tool/env @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# input guards + + env_must_be="tool_shared/bespoke🖉/env" + error_bad_env=false + error_not_sourced=false + if [ "$ENV" != "$env_must_be" ]; then + echo "$(script_fp):: error: must be run in the $env_must_be environment" + error_bad_env=true + fi + if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "$script_afp:: This script must be sourced, not executed." + error_not_sourced=true + fi + if $error_not_sourced; then exit 1; fi + if $error_bad_env; then return 1; fi + +export PATH=\ +"$REPO_HOME"/tool_shared/bespoke/\ +:"$PATH" + +# expose sneaky hidden files +alias ls="ls -a" + +# some feedback to show all went well + + export PROMPT_DECOR="$PROJECT"_administrator + export ENV=$(script_fp) + echo ENV "$ENV" + + + + diff --git a/tool_shared/bespoke/env b/tool_shared/bespoke/env new file mode 100644 index 0000000..76dbada --- /dev/null +++ b/tool_shared/bespoke/env @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") +if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then + echo "$script_afp:: This script must be sourced, not executed." + exit 1 +fi + +# -------------------------------------------------------------------------------- +# project definition + +# actual absolute directory path for this script file + + script_adp(){ + dirname "$script_afp" + } + +# assume this script is located $REPO_HOME/tools_shared/bespoke and work backwards +# to get $REPO_HOME, etc. + + REPO_HOME=$(dirname "$(dirname "$(script_adp)")") + echo REPO_HOME "$REPO_HOME" + + PROJECT=$(basename "$REPO_HOME") + echo PROJECT "$PROJECT" + + # set the prompt decoration to the name of the project + PROMPT_DECOR=$PROJECT + + # include the project bespoke tools + PATH="$REPO_HOME"/tool_shared/bespoke🖉:"$PATH" + + export REPO_HOME PROJECT PROMPT_DECOR PATH + +# -------------------------------------------------------------------------------- +# The project administrator sets up the following tools for all roles to use: +# + PATH="$REPO_HOME/tool_shared/third_party/RT-project-share/release/bash:$PATH" + PATH="$REPO_HOME/tool_shared/third_party/RT-project-share/release/amd64:$PATH" + PATH="$REPO_HOME/tool_shared/third_party/emacs/bin:$PATH" + + # after having installed Itellij IDEA + PATH="$REPO_HOME/tool_shared/third_party/idea-IC-243.21565.193/bin:$PATH" + + JAVA_HOME="$REPO_HOME/tool_shared/third_party/jdk-23.0.1" + + # three packages merely to do logging! + LOGGER_FACADE="$REPO_HOME"/tool_shared/third_party/slf4j-api-2.0.9.jar + LOGGER_CLASSIC="$REPO_HOME"/tool_shared/third_party/logback-classic-1.4.11.jar + LOGGER_CORE="$REPO_HOME"/tool_shared/third_party/logback-core-1.4.11.jar + + export PATH JAVA_HOME LOGGER_FACADE LOGGER_CLASSIC LOGGER_CORE + +# -------------------------------------------------------------------------------- +# the following functions are provided for other scripts to use. +# at the top of files that make use of these functions put the following line: +# script_afp=$(realpath "${BASH_SOURCE[0]}") +# + + ## script's filename + script_fn(){ + basename "$script_afp" + } + + ## script's dirpath relative to $REPO_HOME + script_fp(){ + realpath --relative-to="${REPO_HOME}" "$script_afp" + } + + ## script's dirpath relative to $REPO_HOME + script_dp(){ + dirname "$(script_fp)" + } + + export -f script_adp script_fn script_dp script_fp + +# -------------------------------------------------------------------------------- +# closing + + export ENV=$(script_fp) + echo ENV "$ENV" + diff --git a/tool_shared/bespoke/version b/tool_shared/bespoke/version new file mode 100755 index 0000000..d7fb222 --- /dev/null +++ b/tool_shared/bespoke/version @@ -0,0 +1,7 @@ +#!/bin/env bash +script_afp=$(realpath "${BASH_SOURCE[0]}") + +# 2024-10-24T14:56:09Z project skeleton and test bench files extracted from Ariadne +# 2024-11-08T07:18:03Z prefix `Mosaic_` to class names. See document/class_name.txt. +echo v1.1 + diff --git "a/tool_shared/bespoke\360\237\226\211/env" "b/tool_shared/bespoke\360\237\226\211/env" deleted file mode 100644 index 76dbada..0000000 --- "a/tool_shared/bespoke\360\237\226\211/env" +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") -if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - echo "$script_afp:: This script must be sourced, not executed." - exit 1 -fi - -# -------------------------------------------------------------------------------- -# project definition - -# actual absolute directory path for this script file - - script_adp(){ - dirname "$script_afp" - } - -# assume this script is located $REPO_HOME/tools_shared/bespoke and work backwards -# to get $REPO_HOME, etc. - - REPO_HOME=$(dirname "$(dirname "$(script_adp)")") - echo REPO_HOME "$REPO_HOME" - - PROJECT=$(basename "$REPO_HOME") - echo PROJECT "$PROJECT" - - # set the prompt decoration to the name of the project - PROMPT_DECOR=$PROJECT - - # include the project bespoke tools - PATH="$REPO_HOME"/tool_shared/bespoke🖉:"$PATH" - - export REPO_HOME PROJECT PROMPT_DECOR PATH - -# -------------------------------------------------------------------------------- -# The project administrator sets up the following tools for all roles to use: -# - PATH="$REPO_HOME/tool_shared/third_party/RT-project-share/release/bash:$PATH" - PATH="$REPO_HOME/tool_shared/third_party/RT-project-share/release/amd64:$PATH" - PATH="$REPO_HOME/tool_shared/third_party/emacs/bin:$PATH" - - # after having installed Itellij IDEA - PATH="$REPO_HOME/tool_shared/third_party/idea-IC-243.21565.193/bin:$PATH" - - JAVA_HOME="$REPO_HOME/tool_shared/third_party/jdk-23.0.1" - - # three packages merely to do logging! - LOGGER_FACADE="$REPO_HOME"/tool_shared/third_party/slf4j-api-2.0.9.jar - LOGGER_CLASSIC="$REPO_HOME"/tool_shared/third_party/logback-classic-1.4.11.jar - LOGGER_CORE="$REPO_HOME"/tool_shared/third_party/logback-core-1.4.11.jar - - export PATH JAVA_HOME LOGGER_FACADE LOGGER_CLASSIC LOGGER_CORE - -# -------------------------------------------------------------------------------- -# the following functions are provided for other scripts to use. -# at the top of files that make use of these functions put the following line: -# script_afp=$(realpath "${BASH_SOURCE[0]}") -# - - ## script's filename - script_fn(){ - basename "$script_afp" - } - - ## script's dirpath relative to $REPO_HOME - script_fp(){ - realpath --relative-to="${REPO_HOME}" "$script_afp" - } - - ## script's dirpath relative to $REPO_HOME - script_dp(){ - dirname "$(script_fp)" - } - - export -f script_adp script_fn script_dp script_fp - -# -------------------------------------------------------------------------------- -# closing - - export ENV=$(script_fp) - echo ENV "$ENV" - diff --git "a/tool_shared/bespoke\360\237\226\211/version" "b/tool_shared/bespoke\360\237\226\211/version" deleted file mode 100755 index d7fb222..0000000 --- "a/tool_shared/bespoke\360\237\226\211/version" +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# 2024-10-24T14:56:09Z project skeleton and test bench files extracted from Ariadne -# 2024-11-08T07:18:03Z prefix `Mosaic_` to class names. See document/class_name.txt. -echo v1.1 - diff --git a/tool_shared/document/install.txt b/tool_shared/document/install.txt new file mode 100644 index 0000000..1407032 --- /dev/null +++ b/tool_shared/document/install.txt @@ -0,0 +1,47 @@ + +---------------------------------------- +env_administrator + +For mucking around with the tools install and config, cd to the top of +the project and source the env_administrator environment. + + git clone + cd project + source env_administrator + +---------------------------------------- +General notes on third party tools + +A project will have paths and/or symbolic links pointing into the third party +tools, so as to make use of them. + +The contents of the third_party directory is .gititnored, though the upstream +directory has its own gitignore. + +If you already have the project installed, perhaps because you +are working on it, then a new install is not needed, rather the +existing install can be linked, for example for RT-project-share: + + ln -snf ~/RT-project-share "$REPO_HOME"/tool_shared/third_party + +Otherwise, follow the directions below to make a local +install of the third party tool. + +---------------------------------------- +Logging + +curl -O https://repo1.maven.org/maven2/org/slf4j/slf4j-api/2.0.9/slf4j-api-2.0.9.jar + +curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-classic/1.4.11/logback-classic-1.4.11.jar +curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-core/1.4.11/logback-core-1.4.11.jar + +#curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-classic/1.5.12/logback-classic-1.5.12.jar +#curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-classic/1.5.12/logback-core-1.5.12.jar + +add to bespoke🖉/env names for these for use in CLASSPATH + +---------------------------------------- +see ~/RT-project-share/document🖉 for: + + jdk-23; and one or more IDEs: IntelliJ IDEA, Eclipse, Emacs + diff --git a/tool_shared/document/install_Eclipse_hints.txt b/tool_shared/document/install_Eclipse_hints.txt new file mode 100644 index 0000000..167d14e --- /dev/null +++ b/tool_shared/document/install_Eclipse_hints.txt @@ -0,0 +1,22 @@ + +The project is originally configured to be used with Emacs as an IDE. The tools +can all be run from a shell inside of emacs. Even when using an IDE what the +shell environment scripts and tools do should be understood. + +I have added a working IntelliJ IDEA configuration, so if you want a modern IDE +it is probably best to go with this. See ItelliJ_IDEA.txt in this directory. + +I've not run Eclipse on the project, if you do, perhaps you can update the notes +here. These things will probably increase your odds of making it work: + 1. open a shell + 2. cd to Ariadne, and source the env_developer + 3. run the tool 'distribute_source' + 3. run eclipse from the command line + 4. give eclipse the 'scratchpad' directory as its source + +Be sure to run `release` after development to update what the tester sees. + +Do the analogous steps if you contribute as a 'tester'. I.e. from +the shell source env_tester instead. Also, you will need to add +distribute_source to tester/tool, as it is currently not there. + diff --git a/tool_shared/document/install_IntelliJ_IDEA.txt b/tool_shared/document/install_IntelliJ_IDEA.txt new file mode 100644 index 0000000..82b21cc --- /dev/null +++ b/tool_shared/document/install_IntelliJ_IDEA.txt @@ -0,0 +1,252 @@ + +This file describes the local install and configuration of IntelliJ_IDEA for +the Ariadne project. + +The project was/is originally configured to be used with Emacs as an IDE. The tools +can all be run from a shell inside of emacs. Even when using an IDE what the +shell environment scripts and tools do should be understood. + +-------------------------------------------------------------------------------- +Some notes + +'project directory' - the directory with the .git file in it. Called $REPO_HOME in + RT scripts. Called $PROJECT_DIR$ (doesn't seem to be reliable) in IntelliJ + file paths. + +'module directory' - for RT projects examples include `~/Ariadne/developer' + `~/Ariadne/tester`. These are independent build environments. + + Careful, if Intellij scans directories it will not hesitate to pull things + from `tool_shared`/third_party or wherever else it finds things, and it will + make a big mess. + +IntelliJ paths on forms: + + I tried using $PROJECT_DIR$ as a variable standing for the project directory, + as this was suggested by an AI. However IntelliJ simply made a directory + with the literal variable name. + + Also tried using $REPO_HOME, as that was defined in the environment IntelliJ was run from. + It had the same effect as $PROJECT_DIR$. + + It will work with `~` for the home directory. So I have been using + `~/Ariadne/...` when typing out paths. + + There will be a browser icon at the right of a form entry boxes that take + paths. The browser tool starts from either /home or at / rather than at the + project. It inserts absolute path names. + +A GUI bug: + + There is a Gnome Linux bug where the drop down menu can stay on top no matter + what other window, application, or what virtual desktop a person is on. You + must go back to the IDEA application window and hit to make it go + away. + +The [OK] button at the bottom of dialogs: + + This closes the dialog. + + To apply changes hit [Apply]. + + [OK] will not save what is on the dialog if [Apply] would fail, but + it still closes it. + +-------------------------------------------------------------------------------- +To install ItelliJ + + Download the tar file from + `https://www.jetbrains.com/idea/download/?section=linux` + into the + `$REPO_HOME/tool_shared/third_party/upstream` + directory. + + Expand it into + `$REPO_HOME/tool_shared/third_party` + + cd into the expanded directory, into `bin`, then `chmod u+x` and run `idea_inst`. + + set the env path to include + `$REPO_HOME/tool_shared/third_party/idea-IC*/bin` + + The executable is called `idea`. + + Consider setting a desktop short cut. Consider instead installing it in your + own bin directory. Easily done, just move the directory created by the tar + file expansion there. + + I prefer a user mode install, as there is no reason this tool should need + admin privileges. + +-------------------------------------------------------------------------------- +Startup + + ./tool_shared/third_party/idea-IC-243.21565.193/bin/idea & + + Shows: Welcome screen + select "Open" as Ariadne already exists + + Shows: Open File or Project Browser + In top dialog box put full path to project directory. + + Hit [OK] at the bottom. Unlikely, but might be scrolled off the bottom of the screen. + + Shows: main window + Appears after hitting OK from the "Open File or Project" [ok]. + + Has a tool bar at the top. There is a double meat hamburger menu icon + at the left. Hitting this will replace the top bar with a vertical + menu for drop down menus. + + Careful, after the hamburger icon is pressed, the first drop down + menu instantly appears. Slide over to get the other drop downs. + Don't click, slide! + + Under tool bar: + Far left is an icon bar. Then a file browser. And then a big box + describing hot keys. + +-------------------------------------------------------------------------------- +Configuration + +If you cloned the Ariadne project, the modules will already be configured, and +also probably some of the run configuration will already be configured. + + ------------- + Setup Project + Hamburger icon > File dop-down > Project Structure > Project + + select project SDK from disk: + ~/Ariadne/tool_shared/third_party/jdk-11 + + ------------- + Setup Modules + + Hamburger icon > File dop-down > Project Structure > Modules + + Shows: "Project Structure" dialog + + Hit the '+' option that shows at the top of the second panel. + + New Module. + + Dialog pop-up + + Name: developer + + Location: (browse to the developer directory) + + alternatively enter the full path, ~/Ariadne, e.g. + + $PROJECT_DIR$ instead of, ~/Ariadne, worked when + entering the first module, but not the second. + + Dependencies: + Select the "Project SDK" from the drop down. + + Careful, the module won't be made until hitting [Create] at the bottom. + + As far as I can tell you can't get this panel again, rather delete and add + a new module if you need to change the entries. + + Shows: "Project Structure" dialog, again, now the third panel with information about the + developer module. + Third panel shows three choices: [Source] [Paths] [Dependencies] + + [Sources] is already selected. + + With Sources there are two panels. + + In second panel, on right side, the module root should show at the top. + Under if it lists any sources, use the button at the far right of the + listing to x it out. + + The first panel now shows a file browser for the module. + + Select the `javac` directory with a single click. Then, and only + after, look immediately the directory lists and click on [Sources] + + "Source Folders" will now appear in the second panel. The + javac folder will be listed. + + hit: [apply] at the bottom (or the form will reset to defaults next time) + + + Slide over to [Paths] + Copmiler Output + select [Use Module Compile Output Path] + Output Path: $PROJECT_DIR$/developer/scratchpad + Test Path: $PROJECT_DIR$/developer/test + + leave the exclude output checkbox, that means to exclude from repo + and from indexing for search + + hit: [apply] at the bottom + + ------------- + To add an external tool, for example tester/tool/make: + + This is how we integrate the local tools. + + Note, even if a shell script runs then runs a java program, that jave program + was compiled with debug flags, and run in debug mode, it can't be debugged. It + won't stop at break points, etc. For that an 'application' must be added see + the next section. + + Hamburger> Run > edit configurations + Shows Run/Debug configurations dialog + Upper left hit '+' + Shows drop down + chose [Shell Script] second from bottom + Shows dialog, for example: + Name: tester make + Script Path: ~/Ariadne/tester/tool/make (better to chose with the browser tool) + Script Options: tester make + Working Directory: ~/Ariadne (location of the env source scripts that env_run uses) + Environment variabls: (none, env_run will source env_tester) + Interpreter: /bin/bash (left to default) + + ------------- + To add a program for debugging. + + Humburger > Run > edit configurations + Shows Run/Debug configurations dialog + Upper left hit '+' + Shows drop down + chose [Application] first choice + Shows dialog, for example: + Name: Test_Graph_0 + + next line are two boxes, they are not labeled, the defaults show: + [ module not specified ] [ -cp no module ] + I selected:: + [ java 11 SDk of 'tester' module] [ -cp tester ] + This can be confusing, as the modules are 'tester' and 'developer', but + here it asks for an SDK! Then the next box says it wants a class path, + but it wants a module name! + + next line one box, not labeled + [ main class [] ] + Note icon at right, it will give a list of class names, here in the tester module, + that have main calls, select one. + + next line, again not labeled + [ Program Arguments ] + Test_Graph_0 has no arguments so I left it blank. + + Working Directory: ~/Ariadne + + Environment Variables: + Left blank because the executable itself does not make use of any. I do + know at this point if variables set in the environment IDEA ran in are + inherited. + + 'Modify Options' with a drop down menu. (At the top right of the configuration dialog) + Scan down for the `Java` section. + Check: 'Do not build before run' + (To build this example, go to the Run menu and run `tester make'. Or run make directly + from a console prompt. Be sure to source env_tester first.) + + Next go to main window file browser, click on the file you want to debug, click on the line + to set a break point. Right click to get a menu, and + diff --git a/tool_shared/document/install_emacs.txt b/tool_shared/document/install_emacs.txt new file mode 100644 index 0000000..e5e0527 --- /dev/null +++ b/tool_shared/document/install_emacs.txt @@ -0,0 +1,32 @@ + +System requirements: + +dnf install libX11-devel libXpm-devel libjpeg-devel libpng-devel libtiff-devel +dnf install gtk3-devel giflib-devel gnutls-devel +dnf install ncurses-devel texinfo +dnf install libacl-devel libattr-devel libgccjit libgccjit-devel + +I gather this warning is unavaoidable? +"configure: WARNING: Your version of Gtk+ will have problems with" + +# install and build script: + +cd "$REPO_HOME"/tool_shared/third_party +mkdir -p emacs/{src,build,bin} + +# We sought stability, and now this. What can I say? It has 'visual-wrap-prefix-mode'. +pushd upstream +curl -L -O https://alpha.gnu.org/gnu/emacs/pretest/emacs-30.0.92.tar.xz +popd + +tar -xf upstream/emacs-30.0.92.tar.xz -C emacs/src --strip-components=1 + +pushd emacs/src +./configure --prefix="$REPO_HOME"/tool_shared/third_party/emacs +make -j$(nproc) +make install +make clean +popd + +rm -r emacs/{src,build} + diff --git "a/tool_shared/document\360\237\226\211/install.txt" "b/tool_shared/document\360\237\226\211/install.txt" deleted file mode 100644 index 1407032..0000000 --- "a/tool_shared/document\360\237\226\211/install.txt" +++ /dev/null @@ -1,47 +0,0 @@ - ----------------------------------------- -env_administrator - -For mucking around with the tools install and config, cd to the top of -the project and source the env_administrator environment. - - git clone - cd project - source env_administrator - ----------------------------------------- -General notes on third party tools - -A project will have paths and/or symbolic links pointing into the third party -tools, so as to make use of them. - -The contents of the third_party directory is .gititnored, though the upstream -directory has its own gitignore. - -If you already have the project installed, perhaps because you -are working on it, then a new install is not needed, rather the -existing install can be linked, for example for RT-project-share: - - ln -snf ~/RT-project-share "$REPO_HOME"/tool_shared/third_party - -Otherwise, follow the directions below to make a local -install of the third party tool. - ----------------------------------------- -Logging - -curl -O https://repo1.maven.org/maven2/org/slf4j/slf4j-api/2.0.9/slf4j-api-2.0.9.jar - -curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-classic/1.4.11/logback-classic-1.4.11.jar -curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-core/1.4.11/logback-core-1.4.11.jar - -#curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-classic/1.5.12/logback-classic-1.5.12.jar -#curl -O https://repo1.maven.org/maven2/ch/qos/logback/logback-classic/1.5.12/logback-core-1.5.12.jar - -add to bespoke🖉/env names for these for use in CLASSPATH - ----------------------------------------- -see ~/RT-project-share/document🖉 for: - - jdk-23; and one or more IDEs: IntelliJ IDEA, Eclipse, Emacs - diff --git "a/tool_shared/document\360\237\226\211/install_Eclipse_hints.txt" "b/tool_shared/document\360\237\226\211/install_Eclipse_hints.txt" deleted file mode 100644 index 167d14e..0000000 --- "a/tool_shared/document\360\237\226\211/install_Eclipse_hints.txt" +++ /dev/null @@ -1,22 +0,0 @@ - -The project is originally configured to be used with Emacs as an IDE. The tools -can all be run from a shell inside of emacs. Even when using an IDE what the -shell environment scripts and tools do should be understood. - -I have added a working IntelliJ IDEA configuration, so if you want a modern IDE -it is probably best to go with this. See ItelliJ_IDEA.txt in this directory. - -I've not run Eclipse on the project, if you do, perhaps you can update the notes -here. These things will probably increase your odds of making it work: - 1. open a shell - 2. cd to Ariadne, and source the env_developer - 3. run the tool 'distribute_source' - 3. run eclipse from the command line - 4. give eclipse the 'scratchpad' directory as its source - -Be sure to run `release` after development to update what the tester sees. - -Do the analogous steps if you contribute as a 'tester'. I.e. from -the shell source env_tester instead. Also, you will need to add -distribute_source to tester/tool, as it is currently not there. - diff --git "a/tool_shared/document\360\237\226\211/install_IntelliJ_IDEA.txt" "b/tool_shared/document\360\237\226\211/install_IntelliJ_IDEA.txt" deleted file mode 100644 index 82b21cc..0000000 --- "a/tool_shared/document\360\237\226\211/install_IntelliJ_IDEA.txt" +++ /dev/null @@ -1,252 +0,0 @@ - -This file describes the local install and configuration of IntelliJ_IDEA for -the Ariadne project. - -The project was/is originally configured to be used with Emacs as an IDE. The tools -can all be run from a shell inside of emacs. Even when using an IDE what the -shell environment scripts and tools do should be understood. - --------------------------------------------------------------------------------- -Some notes - -'project directory' - the directory with the .git file in it. Called $REPO_HOME in - RT scripts. Called $PROJECT_DIR$ (doesn't seem to be reliable) in IntelliJ - file paths. - -'module directory' - for RT projects examples include `~/Ariadne/developer' - `~/Ariadne/tester`. These are independent build environments. - - Careful, if Intellij scans directories it will not hesitate to pull things - from `tool_shared`/third_party or wherever else it finds things, and it will - make a big mess. - -IntelliJ paths on forms: - - I tried using $PROJECT_DIR$ as a variable standing for the project directory, - as this was suggested by an AI. However IntelliJ simply made a directory - with the literal variable name. - - Also tried using $REPO_HOME, as that was defined in the environment IntelliJ was run from. - It had the same effect as $PROJECT_DIR$. - - It will work with `~` for the home directory. So I have been using - `~/Ariadne/...` when typing out paths. - - There will be a browser icon at the right of a form entry boxes that take - paths. The browser tool starts from either /home or at / rather than at the - project. It inserts absolute path names. - -A GUI bug: - - There is a Gnome Linux bug where the drop down menu can stay on top no matter - what other window, application, or what virtual desktop a person is on. You - must go back to the IDEA application window and hit to make it go - away. - -The [OK] button at the bottom of dialogs: - - This closes the dialog. - - To apply changes hit [Apply]. - - [OK] will not save what is on the dialog if [Apply] would fail, but - it still closes it. - --------------------------------------------------------------------------------- -To install ItelliJ - - Download the tar file from - `https://www.jetbrains.com/idea/download/?section=linux` - into the - `$REPO_HOME/tool_shared/third_party/upstream` - directory. - - Expand it into - `$REPO_HOME/tool_shared/third_party` - - cd into the expanded directory, into `bin`, then `chmod u+x` and run `idea_inst`. - - set the env path to include - `$REPO_HOME/tool_shared/third_party/idea-IC*/bin` - - The executable is called `idea`. - - Consider setting a desktop short cut. Consider instead installing it in your - own bin directory. Easily done, just move the directory created by the tar - file expansion there. - - I prefer a user mode install, as there is no reason this tool should need - admin privileges. - --------------------------------------------------------------------------------- -Startup - - ./tool_shared/third_party/idea-IC-243.21565.193/bin/idea & - - Shows: Welcome screen - select "Open" as Ariadne already exists - - Shows: Open File or Project Browser - In top dialog box put full path to project directory. - - Hit [OK] at the bottom. Unlikely, but might be scrolled off the bottom of the screen. - - Shows: main window - Appears after hitting OK from the "Open File or Project" [ok]. - - Has a tool bar at the top. There is a double meat hamburger menu icon - at the left. Hitting this will replace the top bar with a vertical - menu for drop down menus. - - Careful, after the hamburger icon is pressed, the first drop down - menu instantly appears. Slide over to get the other drop downs. - Don't click, slide! - - Under tool bar: - Far left is an icon bar. Then a file browser. And then a big box - describing hot keys. - --------------------------------------------------------------------------------- -Configuration - -If you cloned the Ariadne project, the modules will already be configured, and -also probably some of the run configuration will already be configured. - - ------------- - Setup Project - Hamburger icon > File dop-down > Project Structure > Project - - select project SDK from disk: - ~/Ariadne/tool_shared/third_party/jdk-11 - - ------------- - Setup Modules - - Hamburger icon > File dop-down > Project Structure > Modules - - Shows: "Project Structure" dialog - - Hit the '+' option that shows at the top of the second panel. - - New Module. - - Dialog pop-up - - Name: developer - - Location: (browse to the developer directory) - - alternatively enter the full path, ~/Ariadne, e.g. - - $PROJECT_DIR$ instead of, ~/Ariadne, worked when - entering the first module, but not the second. - - Dependencies: - Select the "Project SDK" from the drop down. - - Careful, the module won't be made until hitting [Create] at the bottom. - - As far as I can tell you can't get this panel again, rather delete and add - a new module if you need to change the entries. - - Shows: "Project Structure" dialog, again, now the third panel with information about the - developer module. - Third panel shows three choices: [Source] [Paths] [Dependencies] - - [Sources] is already selected. - - With Sources there are two panels. - - In second panel, on right side, the module root should show at the top. - Under if it lists any sources, use the button at the far right of the - listing to x it out. - - The first panel now shows a file browser for the module. - - Select the `javac` directory with a single click. Then, and only - after, look immediately the directory lists and click on [Sources] - - "Source Folders" will now appear in the second panel. The - javac folder will be listed. - - hit: [apply] at the bottom (or the form will reset to defaults next time) - - - Slide over to [Paths] - Copmiler Output - select [Use Module Compile Output Path] - Output Path: $PROJECT_DIR$/developer/scratchpad - Test Path: $PROJECT_DIR$/developer/test - - leave the exclude output checkbox, that means to exclude from repo - and from indexing for search - - hit: [apply] at the bottom - - ------------- - To add an external tool, for example tester/tool/make: - - This is how we integrate the local tools. - - Note, even if a shell script runs then runs a java program, that jave program - was compiled with debug flags, and run in debug mode, it can't be debugged. It - won't stop at break points, etc. For that an 'application' must be added see - the next section. - - Hamburger> Run > edit configurations - Shows Run/Debug configurations dialog - Upper left hit '+' - Shows drop down - chose [Shell Script] second from bottom - Shows dialog, for example: - Name: tester make - Script Path: ~/Ariadne/tester/tool/make (better to chose with the browser tool) - Script Options: tester make - Working Directory: ~/Ariadne (location of the env source scripts that env_run uses) - Environment variabls: (none, env_run will source env_tester) - Interpreter: /bin/bash (left to default) - - ------------- - To add a program for debugging. - - Humburger > Run > edit configurations - Shows Run/Debug configurations dialog - Upper left hit '+' - Shows drop down - chose [Application] first choice - Shows dialog, for example: - Name: Test_Graph_0 - - next line are two boxes, they are not labeled, the defaults show: - [ module not specified ] [ -cp no module ] - I selected:: - [ java 11 SDk of 'tester' module] [ -cp tester ] - This can be confusing, as the modules are 'tester' and 'developer', but - here it asks for an SDK! Then the next box says it wants a class path, - but it wants a module name! - - next line one box, not labeled - [ main class [] ] - Note icon at right, it will give a list of class names, here in the tester module, - that have main calls, select one. - - next line, again not labeled - [ Program Arguments ] - Test_Graph_0 has no arguments so I left it blank. - - Working Directory: ~/Ariadne - - Environment Variables: - Left blank because the executable itself does not make use of any. I do - know at this point if variables set in the environment IDEA ran in are - inherited. - - 'Modify Options' with a drop down menu. (At the top right of the configuration dialog) - Scan down for the `Java` section. - Check: 'Do not build before run' - (To build this example, go to the Run menu and run `tester make'. Or run make directly - from a console prompt. Be sure to source env_tester first.) - - Next go to main window file browser, click on the file you want to debug, click on the line - to set a break point. Right click to get a menu, and - diff --git "a/tool_shared/document\360\237\226\211/install_emacs.txt" "b/tool_shared/document\360\237\226\211/install_emacs.txt" deleted file mode 100644 index e5e0527..0000000 --- "a/tool_shared/document\360\237\226\211/install_emacs.txt" +++ /dev/null @@ -1,32 +0,0 @@ - -System requirements: - -dnf install libX11-devel libXpm-devel libjpeg-devel libpng-devel libtiff-devel -dnf install gtk3-devel giflib-devel gnutls-devel -dnf install ncurses-devel texinfo -dnf install libacl-devel libattr-devel libgccjit libgccjit-devel - -I gather this warning is unavaoidable? -"configure: WARNING: Your version of Gtk+ will have problems with" - -# install and build script: - -cd "$REPO_HOME"/tool_shared/third_party -mkdir -p emacs/{src,build,bin} - -# We sought stability, and now this. What can I say? It has 'visual-wrap-prefix-mode'. -pushd upstream -curl -L -O https://alpha.gnu.org/gnu/emacs/pretest/emacs-30.0.92.tar.xz -popd - -tar -xf upstream/emacs-30.0.92.tar.xz -C emacs/src --strip-components=1 - -pushd emacs/src -./configure --prefix="$REPO_HOME"/tool_shared/third_party/emacs -make -j$(nproc) -make install -make clean -popd - -rm -r emacs/{src,build} - diff --git "a/tool\360\237\226\211/.githolder" "b/tool\360\237\226\211/.githolder" deleted file mode 100644 index e69de29..0000000 diff --git "a/tool\360\237\226\211/env" "b/tool\360\237\226\211/env" deleted file mode 100644 index d3541e6..0000000 --- "a/tool\360\237\226\211/env" +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash -script_afp=$(realpath "${BASH_SOURCE[0]}") - -# input guards - - env_must_be="tool_shared/bespoke🖉/env" - error_bad_env=false - error_not_sourced=false - if [ "$ENV" != "$env_must_be" ]; then - echo "$(script_fp):: error: must be run in the $env_must_be environment" - error_bad_env=true - fi - if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - echo "$script_afp:: This script must be sourced, not executed." - error_not_sourced=true - fi - if $error_not_sourced; then exit 1; fi - if $error_bad_env; then return 1; fi - -export PATH=\ -"$REPO_HOME"/tool_shared/bespoke/\ -:"$PATH" - -# expose sneaky hidden files -alias ls="ls -a" - -# some feedback to show all went well - - export PROMPT_DECOR="$PROJECT"_administrator - export ENV=$(script_fp) - echo ENV "$ENV" - - - -