Actions

Testing Area/Visual regression testing: Difference between revisions

From Mahara Wiki

< Testing Area
mNo edit summary
mNo edit summary
 
(One intermediate revision by the same user not shown)
Line 62: Line 62:
<code>}],</code>
<code>}],</code>
   <code>"onReadyScript": "puppet/onReady.js",
   <code>"onReadyScript": "puppet/onReady.js",
   "scenarios": [{"label":"/module/multirecipientnotification/inbox.php",
   "scenarios": [{"label":"/module/multirecipientnotification/inbox.php","url":"http://mahara2/module/multirecipientnotification/inbox.php"},</code>
"url":"http://mahara2/module/multirecipientnotification/inbox.php"},
<code>{"label":"/user/view.php?id=1", "url": etc...}],</code>
{"label":"/user/view.php?id=1", "url": etc...}],
 
   "paths": {
   <code>"paths": {
     "bitmaps_reference": "backstop_data/bitmaps_reference",
     "bitmaps_reference": "backstop_data/bitmaps_reference",
     "bitmaps_test": "backstop_data/bitmaps_test",
     "bitmaps_test": "backstop_data/bitmaps_test",
Line 102: Line 102:
    
    
   if (!loggedin) {
   if (!loggedin) {
     await page.type('input[name="login_username"]', 'admin');
     await page.type('input[name="login_username"]', '<your username>');
     await page.type('input[name="login_password"]', 'a');
     await page.type('input[name="login_password"]', '<your password>');
    // let element = await page.$('#login_submit')
    // console.log(element);
     await Promise.all([
     await Promise.all([
       page.waitForNavigation({waitUntil: 'networkidle2'}),
       page.waitForNavigation({waitUntil: 'networkidle2'}),
Line 111: Line 109:
     ]);
     ]);
     await page.waitForTimeout(1000);
     await page.waitForTimeout(1000);
    // await page.goto(scenario.url);
   } else {
   } else {
     return;
     return;

Latest revision as of 10:35, 19 July 2022

Visual Regression Testing with BackstopJS

https://github.com/garris/BackstopJS

This is some documentation of my investigation into using BackstopJS with Mahara. In the end, the fact that the Mahara UI has so many different states that are behaviour driven, means that it would be incredibly time consuming to set up the conditions for these states in the backstop.json config. My conclusion was that, if we are to implement automated visual regression testing for Mahara, that it should be integrated with our current BDD test suite, i.e. Behat.

Set up:

const fs = require('fs-extra'); // writes json files

const urls = [url1, url2, etc ...]; // The list of urls

let scenarios = [];

urls.map(string => {

   let jsonstring = {
       "label": string,
       "url": "<your domain>" + string,
   }
   scenarios.push(jsonstring);

})

fs.writeJson('scenarios.json', scenarios, (err) => {

   if (err) throw err;

});

  • Run this file on cli with node urls.js
  • Copy and paste the json array from the generated scenarios.json file into the backstop.json file as the value for "scenarios": (json array)
  • Make sure your backstop.json file looks like this:

{

 "viewports": [        {
   "name": "Below-480",
   "width": 478,
   "height": 730

}, {

   "name": "Below-768",
   "width": 766,
   "height": 730

}, {

   "name": "Below-992",
   "width": 990,
   "height": 730

}, {

   "name": "Below-1200",
   "width": 1198,
   "height": 730

}, {

   "name": "Below-1440",
   "width": 1438,
   "height": 730

}],

 "onReadyScript": "puppet/onReady.js",
 "scenarios": [{"label":"/module/multirecipientnotification/inbox.php","url":"http://mahara2/module/multirecipientnotification/inbox.php"},
{"label":"/user/view.php?id=1", "url": etc...}],
 "paths": {
   "bitmaps_reference": "backstop_data/bitmaps_reference",
   "bitmaps_test": "backstop_data/bitmaps_test",
   "casper_scripts": "backstop_data/casper_scripts",
   "html_report": "backstop_data/html_report",
   "ci_report": "backstop_data/ci_report"
 },
 "casperFlags": [],
 "engine": "puppet",
 "engineOptions": {
   "headless": true,
   "browser": "chromium"
 },
 "asyncCaptureLimit": 1,
 "debug": true,
 "report": ["browser"]

}

  • And make sure your puppet/onReady.js file looks like this:

module.exports = async (page, scenario, vp) => {

 console.log('SCENARIO > ' + scenario.label);
 // await require('./clickAndHoverHelper')(page, scenario);
 
 // add more ready handlers here...
 // check for login element
 let loggedin;
 await Promise.all([
   page.goto(scenario.url),
   page.waitForSelector('input[name="login_username"]', {timeout: 3000}),
 ]).then(() => {
   loggedin = false;
 }).catch(() => {
   loggedin = true;
 })
 
 if (!loggedin) {
   await page.type('input[name="login_username"]', '<your username>');
   await page.type('input[name="login_password"]', '<your password>');
   await Promise.all([
     page.waitForNavigation({waitUntil: 'networkidle2'}),
     page.click('#login_submit'),
   ]);
   await page.waitForTimeout(1000);
 } else {
   return;
 }

};

  • Start by running backstop reference - This will create reference screenshots for you to compare with when you run your test.
  • Then make your changes or checkout your test patch
  • Then run backstop test - this will take screenshots and compare them with your reference screenshots

For some reason the report tries to open in Matrix... But you can view the report results by opening the generated backstop_data/html_report/index.html in a browser.

If you're happy with one of the test images, then you can run backstop approve and pass a --filter=<image_filename_regex> argument to promote that specific test capture matching your scenario filename. The filenames (which by default include scenario and viewport names) are displayed in the visual report.