I am working on chrome extension. The task is to classify images on a webpage. I have set up my model, i am constantly getting an error when converting image to a tensor before making a prediction. I am getting all the images using document.getElementsByTagName('img'). This returns an object. Then i am using tensor = tf.fromPixels(image[0]) to get tensor.
It says :
Error in event handler for runtime.onMessage: Error: pixels passed to tf.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement or ImageData, but was Object at pre_process () at
Please help.
EDIT: My code is:
//to load images
function load_imgs(debug) {
    //for onload images of a webpage
    if(debug == 'static'){
      imgs = document.getElementsByTagName('img');
      console.log('imgs type after get bytag', typeof imgs);
      var imgSrcs = [];
      for (var i = 0; i < imgs.length; i++) {
          imgSrcs.push(imgs[i]);
      }
      console.log('imgSrcs type:', typeof imgSrcs);
      return imgSrcs;
    }
} 
Passing it to background Script as:
//send request to background script,takes image elem ref as input
function back_request(elem_ref){
    console.log('from back_request ' + typeof elem_ref);
    console.log("back_request : ", elem_ref);
    chrome.runtime.sendMessage({
        get_predict: "predict",
        img: elem_ref
    },function (response) {
        console.log(response.prediction_model);
    });
}
This is background Script listener for message
//message listner for the prediction
chrome.runtime.onMessage.addListener(
    function (request, sender, sendResponse) {
         console.log(sender.tab ?
             "from a content script:" + sender.tab.url :
             "from the extension");
         if (request.get_predict == "predict") {
             to_predict = request.img;
             console.log('from receive request ', to_predict);
             tensor = pre_process(to_predict); //this generates an error, since i am using tf.fromPixels() method to convert it to a tensor to make a prediction
             ans = predict_str(model, tensor);
             sendResponse({
                 prediction_model: 'Processing'
             });
         }
