The simplest way to gather the images from a website is to find them by tagName.
First we create a WebElement list with the elements which tagName is ”img”.
Then we check the elements to be sure that they are displayed.
If they are displayed then we get their names and sources.
To download them we use the “org.apache.commons.io.FileUtils.copyURLToFile” function.
It is very simple, we just need to add the URL and the file path to it.
In the end we write out the number of displayed elements.
Here is to full code:
import java.io.File; import java.net.URL; import java.util.List; import org.openqa.selenium.By; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import org.openqa.selenium.firefox.FirefoxDriver; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; public class DownloadingImages{ WebDriver driver; @BeforeClass public void setUp(){ driver = new FirefoxDriver(); driver.get("http://en.wikipedia.org/wiki/Flower"); } @AfterClass public void tearDown(){ driver.quit(); } @Test public void testImages()throws Exception{ File files = null; Integer counter=0; Thread.sleep(5000); List<WebElement> listImages=driver.findElements(By.tagName("img")); System.out.println("No. of Images: "+listImages.size()); for(WebElement image:listImages) { if(image.isDisplayed()) { counter++; String[] names = image.getAttribute("src").split("/"); String name = names[names.length-1]; System.out.println(name); URL myURL = new URL(image.getAttribute("src")); files= new File("C:\\downloadedPictures\\"+ System.currentTimeMillis()+ "." +name); org.apache.commons.io.FileUtils.copyURLToFile(myURL, files); } } System.out.println("No. of total displable images: "+counter); } }