admin管理员组

文章数量:1410689

I applied transformation during training phase in pytorch then I convert my model to run in tensorflow.js. It is working fine but got wrong predictions as I didn't apply same transformation.

test_transform = torchvision.transforms.Compose([
    torchvision.transforms.Resize(size=(224, 224)),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

I am able to resize image but not able to normalize. how can I do that?

Update:-

<script src="/@tensorflow/tfjs/dist/tf.min.js" type="text/javascript"></script>
    <script>
        {% load static %}
       async function load_model(){
            const model = await tf.loadGraphModel("{% static 'disease_detection/tfjs_model_2/model.json' %}");
            console.log(model);
            return model;
        }

        function loadImage(src){
            return new Promise((resolve, reject) => {
                const img = new Image();
                img.src = src;
                img.onload = () => resolve(tf.browser.fromPixels(img, 3));
                img.onerror = (err) => reject(err);
            });
        }

        

        function resizeImage(image) {

            return tf.image.resizeBilinear(image, [224, 224]).sub([0.485, 0.456, 0.406]).div([0.229, 0.224, 0.225]);

        }

        function batchImage(image) {
            
            const batchedImage = image.expandDims(0);  
            //const batchedImage = image; 
            return batchedImage.toFloat();
        }

        function loadAndProcessImage(image) {
            //const croppedImage = cropImage(image);
            const resizedImage = resizeImage(image);
            const batchedImage = batchImage(resizedImage);
            return batchedImage;
        }


        let model =  load_model();
       model.then(function (model_param){
            loadImage('{% static 'disease_detection/COVID-19 (97).png' %}').then(img=>{
             let imge = loadAndProcessImage(img);
             const t4d = tf.tensor4d(Array.from(imge.dataSync()),[1,3,224,224])
                console.log(t4d.dataSync());
             let prediction = model_param.predict(t4d);
             let v = prediction.argMax().dataSync()[0]
             console.log(v)
        })
       })

I tried this code but it is not normalizing image properly.

I applied transformation during training phase in pytorch then I convert my model to run in tensorflow.js. It is working fine but got wrong predictions as I didn't apply same transformation.

test_transform = torchvision.transforms.Compose([
    torchvision.transforms.Resize(size=(224, 224)),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

I am able to resize image but not able to normalize. how can I do that?

Update:-

<script src="https://cdn.jsdelivr/npm/@tensorflow/tfjs/dist/tf.min.js" type="text/javascript"></script>
    <script>
        {% load static %}
       async function load_model(){
            const model = await tf.loadGraphModel("{% static 'disease_detection/tfjs_model_2/model.json' %}");
            console.log(model);
            return model;
        }

        function loadImage(src){
            return new Promise((resolve, reject) => {
                const img = new Image();
                img.src = src;
                img.onload = () => resolve(tf.browser.fromPixels(img, 3));
                img.onerror = (err) => reject(err);
            });
        }

        

        function resizeImage(image) {

            return tf.image.resizeBilinear(image, [224, 224]).sub([0.485, 0.456, 0.406]).div([0.229, 0.224, 0.225]);

        }

        function batchImage(image) {
            
            const batchedImage = image.expandDims(0);  
            //const batchedImage = image; 
            return batchedImage.toFloat();
        }

        function loadAndProcessImage(image) {
            //const croppedImage = cropImage(image);
            const resizedImage = resizeImage(image);
            const batchedImage = batchImage(resizedImage);
            return batchedImage;
        }


        let model =  load_model();
       model.then(function (model_param){
            loadImage('{% static 'disease_detection/COVID-19 (97).png' %}').then(img=>{
             let imge = loadAndProcessImage(img);
             const t4d = tf.tensor4d(Array.from(imge.dataSync()),[1,3,224,224])
                console.log(t4d.dataSync());
             let prediction = model_param.predict(t4d);
             let v = prediction.argMax().dataSync()[0]
             console.log(v)
        })
       })

I tried this code but it is not normalizing image properly.

Share Improve this question edited Mar 1, 2021 at 12:37 ameya asked Feb 25, 2021 at 15:38 ameyaameya 3012 silver badges18 bronze badges
Add a ment  | 

2 Answers 2

Reset to default 5
  • torchvision.transforms.ToTensor() converts PIL Image or numpy array in the range of 0 to 255 to a float tensor os shape (channels x Height x Width) in the range 0.0 to 1.0 . To convert in the range 0.0 to 1.0 it divide each element of tensor by 255. So, execute same in tensorflowJS I done as follows -
img = tf.image.resizeBilinear(img, [224, 224]).div(tf.scalar(255))
img = tf.cast(img, dtype = 'float32');
  • torchvision.transforms.Normalize() normalize a tensor image with mean and standard deviation. Given mean: (mean[1],...,mean[n]) and std: (std[1],..,std[n]) for n channels, this transform will normalize each channel of the input tensor i.e., output[channel] = (input[channel] - mean[channel]) / std[channel] . I didn't find any such function in tensorflowJS. So, I seperately normalized each channel and bined them again.

Complete function is as follows -

function imgTransform(img){
            img = tf.image.resizeBilinear(img, [224, 224]).div(tf.scalar(255))
            img = tf.cast(img, dtype = 'float32');

            /*mean of natural image*/
           let meanRgb = {  red : 0.485,  green: 0.456,  blue: 0.406 }

           /* standard deviation of natural image*/
           let stdRgb = { red: 0.229,  green: 0.224,  blue: 0.225 }

            let indices = [
                        tf.tensor1d([0], "int32"),
                        tf.tensor1d([1], "int32"),
                        tf.tensor1d([2], "int32")
            ];

           /* sperating tensor channelwise and applyin normalization to each chanel seperately */
           let centeredRgb = {
               red: tf.gather(img,indices[0],2)
                        .sub(tf.scalar(meanRgb.red))
                        .div(tf.scalar(stdRgb.red))
                        .reshape([224,224]),
               
               green: tf.gather(img,indices[1],2)
                        .sub(tf.scalar(meanRgb.green))
                        .div(tf.scalar(stdRgb.green))
                        .reshape([224,224]),
               
               blue: tf.gather(img,indices[2],2)
                        .sub(tf.scalar(meanRgb.blue))
                        .div(tf.scalar(stdRgb.blue))
                        .reshape([224,224]),
           }
          

            /* bining seperate normalized channels*/
            let processedImg = tf.stack([
                centeredRgb.red, centeredRgb.green, centeredRgb.blue
            ]).expandDims();
           return processedImg;
        }

Even though I am not too much acquainted with pytorch documentation, a quick look at it shows that the first parameter for Normalize is for the mean of the dataset and the second parameter is for the standard deviation.

To normalize using these two parameters with tensorflow.js, the following can be used

tensor.sub([0.485, 0.456, 0.406]).div([0.229, 0.224, 0.225])

But the tensor values should be in the range of 0 to 1 by dividing it to 255 after the resize operation. The whole pose operation will look as the following

tf.image.resizeBilinear(image, [224, 224]).div(255)
  .sub([0.485, 0.456, 0.406])
  .div([0.229, 0.224, 0.225]);

本文标签: javascriptHow to normalize image in tensorflowjsStack Overflow