diff --git a/Python/samples/mnist.py b/Python/samples/mnist.py index 6c6eb839..fee876ab 100644 --- a/Python/samples/mnist.py +++ b/Python/samples/mnist.py @@ -5,112 +5,50 @@ import pydirectml as dml import numpy as np -from PIL import Image, ImageOps import sys import os -argument_count = len(sys.argv) +input_data = [10, 10, 10, 10, 21, 22, 23, 24, 10, 20, 30, 40, 0, 0, 0, 0] +input_data_array = np.array(input_data, np.float32) -image_file_path = "mnist_image1.png" -tensor_data_path = "mnist-8_tensor_data" +weight_data = [0.25, 0.25, 0.25, 0.25, 0.0, 1.0, 0.0, 1.0, 10.0, 20.0, 30.0, 40.0, 50.0, + 50.0, 50.0, 50.0] +weight_data_array = np.array(weight_data, np.float32) -if (argument_count >= 2): - image_file_path = sys.argv[1] - -if (argument_count >= 3): - tensor_data_path = sys.argv[2] - -if (os.path.exists(image_file_path) == False): - print("File not found at: " + str(image_file_path)) - sys.exit(1) - -# Opens image, converts to grayscale, resizes, and crops to the input size. -image = ImageOps.fit(ImageOps.grayscale(Image.open(image_file_path)), (28, 28), method = 0, bleed = 0, centering = (0.5, 0.5)) - -# Check top left pixel's color, and inverts image color if pixel is bright. -x, y = 0, 0 -coordinate = x, y -if (image.getpixel(coordinate) >= 128): - image = ImageOps.invert(image) - -# Expand dimensions to 4d tensor format, and rescale values in range of 0.0 and 1.0. -img_array = np.array(image, np.float32) -ndarray_image = np.expand_dims(img_array, axis = (0, 1)) -rescaled_image = ndarray_image / ndarray_image.max() +bias_data = [6000, 7000, 8000, 9000] +bias_data_array = np.array(bias_data, np.float32) input_bindings = [] -def append_input_tensor(builder: dml.GraphBuilder, input_bindings: list, input_tensor: dml.TensorDesc, file_name: str): + +def append_input_tensor(builder: dml.GraphBuilder, input_bindings: list, input_tensor: dml.TensorDesc, tensor_data_array): tensor = dml.input_tensor(builder, len(input_bindings), input_tensor) - if file_name == "": - input_bindings.append(dml.Binding(tensor, np.zeros(tensor.get_output_desc().sizes))) - else: - input_bindings.append(dml.Binding(tensor, np.load(tensor_data_path + "/" + file_name))) + input_bindings.append(dml.Binding(tensor, tensor_data_array)) return tensor -# Create a GPU device, and build a model graph. + device = dml.Device(True, True) builder = dml.GraphBuilder(device) data_type = dml.TensorDataType.FLOAT32 -input = dml.input_tensor(builder, 0, dml.TensorDesc(data_type, [1, 1, 28, 28])) +input = dml.input_tensor(builder, 0, dml.TensorDesc(data_type, [1, 4, 2, 2])) flags = dml.TensorFlags.OWNED_BY_DML -input_bindings.append(dml.Binding(input, rescaled_image)) - -# convolution28 -convolution28_weight = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, flags, [8, 1, 5, 5]), "Parameter5.npy") -convolution28_bias = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, flags, [1, 8, 1, 1]), "") -convolution28 = dml.convolution(input, convolution28_weight, convolution28_bias, strides = [1, 1], start_padding = [2, 2], end_padding = [2, 2]) - -# plus30 -plus30_param6 = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, [1, 8, 28, 28], [8, 1, 0, 0]), "Parameter6.npy") -plus30 = dml.add(convolution28, plus30_param6) - -# relu32 -relu32 = dml.activation_relu(plus30) +input_bindings.append(dml.Binding(input, input_data_array)) -# pooling66 -pooling66 = dml.max_pooling(relu32, strides = [2, 2], window_sizes = [2, 2]) +convolution_weight = append_input_tensor(builder, input_bindings, dml.TensorDesc( + data_type, flags, [4, 1, 2, 2]), weight_data_array) +convolution_bias = append_input_tensor(builder, input_bindings, dml.TensorDesc( + data_type, flags, [1, 4, 1, 1]), bias_data_array) +convolution = dml.convolution(input, convolution_weight, convolution_bias, strides=[ + 1, 1], start_padding=[0, 0], end_padding=[0, 0], group_count=4) -# convolution110 -convolution110_weight = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, flags, [16, 8, 5, 5]), "Parameter87.npy") -convolution110_bias = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, flags, [1, 16, 1, 1]), "") -convolution110 = dml.convolution(pooling66.values, convolution110_weight, convolution110_bias, strides = [1, 1], start_padding = [2, 2], end_padding = [2, 2]) - -# plus112 -plus112_param88 = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, [1, 16, 14, 14], [16, 1, 0, 0]), "Parameter88.npy") -plus112 = dml.add(convolution110, plus112_param88) - -# relu114 -relu114 = dml.activation_relu(plus112) - -# pooling160 -pooling160 = dml.max_pooling(relu114, strides = [3, 3], window_sizes = [3, 3]) - -# times212_reshape0 -times212_reshape0 = dml.reinterpret(pooling160.values, dml.TensorDataType.FLOAT32, [1, 1, 1, 256], [256, 256, 256, 1]) - -# times212_reshape1 -times212_reshape1_param193 = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, flags, [16, 4, 4, 10]), "Parameter193.npy") -identity = dml.activation_identity(times212_reshape1_param193) -times212_reshape1 = dml.reinterpret(identity, dml.TensorDataType.FLOAT32, [1, 1, 256, 10], [2560, 2560, 10, 1]) - -# times212 -times212 = dml.gemm(times212_reshape0, times212_reshape1) - -# plus214 -plus214_param194 = append_input_tensor(builder, input_bindings, dml.TensorDesc(data_type, flags, [1, 1, 1, 10]), "Parameter194.npy") -plus214 = dml.add(times212, plus214_param194) - -softmax = dml.activation_soft_max(plus214) -# Compile the expression graph into a compiled operator -op = builder.build(dml.ExecutionFlags.NONE, [softmax]) +op = builder.build(dml.ExecutionFlags.NONE, [convolution]) # Compute the result -output_data = device.compute(op, input_bindings, [softmax]) +output_data = device.compute(op, input_bindings, [convolution]) output_tensor = np.array(output_data[0], np.float32) - -number = np.argmax(output_tensor) -print("\nNumber is: {}".format(number, end='')) -print("Confidence: {:2.2f}%".format(np.amax(output_tensor) * 100)) \ No newline at end of file +print(output_tensor) +# The correct result should be [6010, 7046, 11000, 9000], +# microsoft.ai.directml.1.5.1 and microsoft.ai.directml.1.6.0 got the same correct result, +# but microsoft.ai.directml.1.7.0 and microsoft.ai.directml.1.8.0 actually got wrong result as [6010, 7000, 8000, 9000];