From: Thomas Walker Lynch Date: Fri, 1 Nov 2024 11:02:04 +0000 (+0000) Subject: a checkpoint X-Git-Url: https://git.reasoningtechnology.com/style/static/gitweb.css?a=commitdiff_plain;h=c63be1e1366acefc2d799b8857674f503bea6996;p=Mosaic a checkpoint --- diff --git a/developer/javac/TestBench.java b/developer/javac/TestBench.java index ea0eb3c..01dbba7 100644 --- a/developer/javac/TestBench.java +++ b/developer/javac/TestBench.java @@ -85,7 +85,7 @@ public class TestBench { /* -------------------------------------------------------------------------------- Run all tests in the test suite */ - public static void run(Object test_suite){ + public static int run(Object test_suite){ int failed_tests = 0; int passed_tests = 0; Method[] methods = test_suite.getClass().getDeclaredMethods(); @@ -99,5 +99,8 @@ public class TestBench { System.out.println("Total tests run: " + (passed_tests + failed_tests)); System.out.println("Total tests passed: " + passed_tests); System.out.println("Total tests failed: " + failed_tests); + + return (failed_tests > 0) ? 1 : 0; } + } diff --git a/document/Tests_Writing_Output_Stream_Policy.html b/document/Tests_Writing_Output_Stream_Policy.html new file mode 100644 index 0000000..29bd872 --- /dev/null +++ b/document/Tests_Writing_Output_Stream_Policy.html @@ -0,0 +1,112 @@ + + + + + + + Output Stream Policy - Mosaic Project + + + +
+

Output Stream Policy for Tests

+ +

Overview of the IO Object

+ +

Each test function is given an IO object, which provides + methods for inspecting stdout and stderr output + streams, programmatically adding data to the stdin input stream, + and clearing output streams as needed. Although the IO object is + optional, it is available for cases where I/O validation or cleanup is + essential to the test.

+ +

Purpose

+ +

Each test function is responsible for managing any output generated + on stdout or stderr by the function under test + (fut). TestBench will automatically clear the streams before + each test begins and will check them after the test completes, treating any + remaining output as unintended and marking the test as a failure. This policy + ensures that tests intentionally handle output by either validating, + clearing, or ignoring it, thereby maintaining a clean and predictable testing + environment.

+ +

Policy Guidelines

+ + +

Example Scenarios

+ + +

Summary

+

Each test should manage its output streams with an intentional policy:

+ +

This approach ensures that tests remain clean and focused on their primary objectives without unintended side effects from unhandled output.

+
+ + diff --git a/document/White_Box_Testing.html b/document/White_Box_Testing.html new file mode 100644 index 0000000..86caec7 --- /dev/null +++ b/document/White_Box_Testing.html @@ -0,0 +1,259 @@ + + + + + + + White Box Testing - Mosaic Project + + + +
+
+

White Box Testing

+

© 2024 Thomas Walker Lynch - All Rights Reserved.

+
+ +

Introduction

+ +
+

Testing centers around three key components: the test + bench, the test functions (or tests), and + the functions under test. In most cases, the + developer provides the functions under test. When this tool is used, Mosaic + supplies the test bench. This leaves the tester with the role of creating and + running the tests. Often, of course, the tester role and the developer role are + performed by the same person, though these roles are distinct.

+ +

The term function refers to any program or + circuit where outputs are determined solely by inputs, without internal + state being kept, and without side effects. All inputs and outputs are + explicitly defined. By definition, a function returns a single result, but + this is not a very strong constraint because said single result can be a + collection, such as a vector or set.

+ +

We need this precise definition for a function to make meaningful + statements in this document, but the Mosaic TestBench can be used with + tests designed to evaluate any type of subroutine. A later chapter will + cover testing stateful subroutines, provided that I get around to writing it.

+ +

There is also a nuanced distinction between function + in singular and plural forms, because a collection of functions can be viewed as + a single larger function with perhaps more inputs and outputs. Hence, when a test + is said to work on a function, we cannot conclude that it is a single function + defined in the code.

+ +

A test must have access to the function under test so that it can supply + inputs and harvest outputs from it. A test must also have a + failure detection function that, when given + copies of the inputs and outputs, will return a result indicating if a + test failed or not. Ideally, the failure detection function is accurate, + or even perfect, as this reduces missed failures and minimizes the need + to verify cases that it has flagged as failures.

+ +

The tester’s goal is to identify failures, + observable differences between actual outputs and expected outputs. Once a + failure is identified, a developer can investigate the issue, locate + the fault, and implement corrections as + necessary. While Mosaic aids in failure detection, it does not directly + assist with debugging.

+ +
+ +

Unstructured Testing

+ +

Unstructured testing is at the base of all testing strategies. The following are some + examples of approaches to unstructured testing. The Mosaic TestBench is agnostic + to the approach used for unstructured testing, rather this section is about writing + the test code that the TestBench will call.

+ +

Reference Value based testing

+ +

In reference value-based testing, an ordering + is assigned to the inputs for + the function under test, as well as to + its outputs. With this ordering, the function + under test can be said to receive an input + vector and to return an actual output vector.

+ +

In this testing approach, a Reference Model is also used. + When given an input vector, the Reference Model will produce + a corresponding reference output vector that follows the + same component ordering as the actual output vector from the + function under test.

+ +

The failure detection function then compares each + actual output vector with its respective reference output vector. If they do + not match, the test is deemed to have failed.

+ +

The Reference Model is sometimes referred to as the golden + model, and said to produce golden values. However, this + terminology is often an exaggeration, as testing frequently reveals inaccuracies + in reference values.

+ +

Thus, in reference value-based testing, the failure detection function + relies on a comparison between the actual and reference output vectors. Its accuracy + depends directly on the accuracy of the Reference Model.

+ +

Property Check Testing

+ +

Property check testing is an alternative to + reference value-based testing. Here, rather than comparing the actual + outputs to reference outputs, the actual output is validated against + known properties or expected characteristics.

+ +

For example, given an integer as input, a function that squares this + input should yield an even result for even inputs and an odd result for odd + inputs. If the output satisfies the expected property, the test passes; + otherwise, it fails. This approach allows testing of general behaviors + without specific reference values.

+ +

Spot Checking

+ +

With spot checking, the function under test is checked against one or + two input vectors.

+ +

Moving from zero to one, i.e. trying a program for the first time, + can have a particularly high threshold of difficulty. A tremendous + around is learned during development if even one tests passes for + a function.

+ +

Sometimes there are notorious edge cases. Zeros and one off the + end of arrays come to mind. Checking a middle value and the edge + cases is often an effective test.

+ +

It takes two points to determine a line. In Fourier Analysis, + it takes two samples per period of the highest frequency component + to determine an entire wave form. There is only so much a piece of + code can do different if it works at the edge cases and in between. + It is because of this effect that ad hoc testing has produced so + much working code. +

+ +

Spot checking is particularly useful during development. It is the + highest leverage testing return for low investment. High investment is + not approrpiate for code in development that is not stable, and is open to + being refactored. +

+ +
+ + + + diff --git a/release/Mosaic.jar b/release/Mosaic.jar index 122284d..dd5ec49 100644 Binary files a/release/Mosaic.jar and b/release/Mosaic.jar differ diff --git a/tester/document/about_the_tests.html b/tester/document/about_the_tests.html index f7d5f0d..18d6cc2 100644 --- a/tester/document/about_the_tests.html +++ b/tester/document/about_the_tests.html @@ -36,18 +36,20 @@

These tests are primarily ad hoc, as we avoid using the TestBench to test itself. Despite being ad hoc, the tests follow a core philosophy: the goal - is to identify which functions fail, rather than diagnose why they fail. To - achieve this, tests do not print messages but instead - return true if they pass.

+ is to identify which functions fail, rather than diagnose why they fail. In + the argot of the field, we are looking for function failures ad are not + identifying falts. Hence, tests do not print messages but signal if they + fail, or not.

Accordingly, only pass/fail counts and the names of failing functions are - recorded. For more detailed investigation, the developer can run a failed - test using a debugging tool such as jdb.

+ recorded. For more detailed investigation, for locating the fault, the + developer can run a failed test using a debugging tool such + as jdb.

1. Running the Tests

To run all tests and gather results, follow these steps:

    -
  1. Ensure the environment is clean by running clean_build_directories.
  2. +
  3. Make sure no old files are hanging about by running clean_build_directories.
  4. Run make to compile the project and prepare all test class shell wrappers.
  5. Run run_tests to run the tests. Each test class will output its results, identifying tests that failed.
  6. diff --git a/tester/javac/Test_MockClass.java b/tester/javac/Test_MockClass.java new file mode 100644 index 0000000..64bc962 --- /dev/null +++ b/tester/javac/Test_MockClass.java @@ -0,0 +1,98 @@ +/* -------------------------------------------------------------------------------- + Integration tests directly simulate the use cases for TestBench. + Each test method validates a specific feature of TestBench ,including pass, + fail ,error handling ,and I/O interactions. +*/ + +import java.util.Scanner; +import com.ReasoningTechnology.Mosaic.IO; +import com.ReasoningTechnology.Mosaic.TestBench; + +public class Test_MockClass{ + + public class TestSuite{ + + public TestSuite() { + // no special initialization of data for this test + } + + public Boolean test_failure_0(IO io){ + return false; + } + + // returns a non-Boolean + public Object test_failure_1(IO io){ + return 1; + } + + // has an uncaught error + public Boolean test_failure_2(IO io) throws Exception { + throw new Exception("Intentional exception for testing error handling"); + } + + // extraneous characters on stdout + public Boolean test_failure_3(IO io) throws Exception { + System.out.println("Intentional extraneous chars to stdout for testing"); + return true; + } + + // extraneous characters on stderr + public Boolean test_failure_4(IO io) throws Exception { + System.err.println("Intentional extraneous chars to stderr for testing."); + return true; + } + + public Boolean test_success_0(IO io){ + return true; + } + + // pushing input for testing + + public Boolean test_success_1(IO io){ + io.push_input("input for the fut"); + + Scanner scanner = new Scanner(System.in); + String result = scanner.nextLine(); + scanner.close(); + + Boolean flag = result.equals("input for the fut"); + return flag; + } + + // checking fut stdout + public Boolean test_success_2(IO io){ + System.out.println("fut stdout"); // suppose the fut does this: + String peek_at_futs_output = io.get_out_content(); + Boolean flag0 = io.has_out_content(); + Boolean flag1 = peek_at_futs_output.equals("fut stdout\n"); + io.clear_buffers(); // otherwise extraneous chars will cause an fail + return flag0 && flag1; + } + + // checking fut stderr + public Boolean test_success_3(IO io){ + System.err.print("fut stderr"); // suppose the fut does this: + String peek_at_futs_output = io.get_err_content(); + Boolean flag0 = io.has_err_content(); + Boolean flag1 = peek_at_futs_output.equals("fut stderr"); + io.clear_buffers(); // otherwise extraneous chars will cause an fail + return flag0 && flag1; + } + + } + + public static void main(String[] args) { + Test_MockClass outer = new Test_MockClass(); + TestSuite suite = outer.new TestSuite(); // Non-static instantiation + + /* for debug + IO io = new IO(); + io.redirect(); + suite.test_success_2(io); + */ + + int result = TestBench.run(suite); // Pass the suite instance to TestBench + System.exit(result); + } + +} diff --git a/tester/javac/Test_TestBench.java b/tester/javac/Test_TestBench.java index 848fae3..a4ae469 100644 --- a/tester/javac/Test_TestBench.java +++ b/tester/javac/Test_TestBench.java @@ -23,6 +23,7 @@ public class Test_TestBench { // Tests if a method with an invalid return type is identified as malformed by TestBench public static Boolean test_method_is_wellformed_1(IO io) { + System.out.println("Expected output: Structural problem message for dummy_invalid_return_method."); try { Method invalidReturnMethod = Test_TestBench.class.getMethod("dummy_invalid_return_method", IO.class); return Boolean.FALSE.equals(TestBench.method_is_wellformed(invalidReturnMethod)); @@ -64,9 +65,9 @@ public class Test_TestBench { if (test_run_test_0(io)) passed_tests++; else { System.out.println("test_run_test_0"); failed_tests++; } // Summary for all the tests - System.out.println("Total tests run: " + (passed_tests + failed_tests)); - System.out.println("Total tests passed: " + passed_tests); - System.out.println("Total tests failed: " + failed_tests); + System.out.println("Test_TestBench Total tests run: " + (passed_tests + failed_tests)); + System.out.println("Test_TestBench Total tests passed: " + passed_tests); + System.out.println("Test_TestBench Total tests failed: " + failed_tests); return (failed_tests > 0) ? 1 : 0; } diff --git a/tester/jvm/Test_Mosaic.jar b/tester/jvm/Test_Mosaic.jar index ff370e3..73b5565 100644 Binary files a/tester/jvm/Test_Mosaic.jar and b/tester/jvm/Test_Mosaic.jar differ diff --git a/tester/shell/Test_MockClass b/tester/shell/Test_MockClass new file mode 100755 index 0000000..2e4f2a7 --- /dev/null +++ b/tester/shell/Test_MockClass @@ -0,0 +1,2 @@ +#!/bin/env bash +java Test_MockClass diff --git a/tester/tool/shell_wrapper_list b/tester/tool/shell_wrapper_list index 3b46b8d..99bf5de 100755 --- a/tester/tool/shell_wrapper_list +++ b/tester/tool/shell_wrapper_list @@ -9,5 +9,5 @@ if [ "$ENV" != "$env_must_be" ]; then fi # space separated list of shell interface wrappers -echo Test0 Test_Util Test_IO Test_TestBench +echo Test0 Test_Util Test_IO Test_TestBench Test_MockClass