bash$ wget http://fly.srk.fer.hr/
bash$ wget --tries=45 http://fly.srk.fer.hr/jpg/flyweb.jpg
bash$ wget -t 45 -o log http://fly.srk.fer.hr/jpg/flyweb.jpg &
bash$ wget -i file
bash$ wget -r https://www.gnu.org/ -o gnulog
bash$ wget --convert-links -r https://www.gnu.org/ -o gnulog
bash$ wget -p --convert-links http://www.example.com/dir/page.html
bash$ wget -p --convert-links -nH -nd -Pdownload http://www.example.com/dir/page.html
bash$ wget -S http://www.lycos.com/
bash$ wget --save-headers http://www.lycos.com/
bash$ wget -r -l2 -P/tmp ftp://wuarchive.wustl.edu/ # Retrieve the first two levels of ‘wuarchive.wustl.edu’, saving them to /tmp.
bash$ wget -r -l1 --no-parent -A.gif http://www.example.com/dir/
bash$ wget -nc -r https://www.gnu.org/
bash$ wget -O - http://jagor.srce.hr/ http://www.srce.hr/ # You would like the output documents to go to standard output instead of to files?
bash$ wget -O - http://cool.list.com/ | wget --force-html -i -
bash$ wget --mirror https://www.gnu.org/ -o /home/me/weeklog
bash$ wget --mirror --convert-links --backup-converted https://www.gnu.org/ -o /home/me/weeklog
bash$ wget --mirror --convert-links --backup-converted --html-extension -o /home/me/weeklog https://www.gnu.org/
bash$ wget --limit-rate 400k http://archlinux.mirrors.uk2.net/iso/2016.09.03/archlinux-2016.09.03-dual.iso
bash$ wget -o iso.log http://archlinux.mirrors.uk2.net/iso/2016.09.03/archlinux-2016.09.03-dual.iso
bash$ wget -mk -w 20 http://www.example.com/
bash$ wget -o wget.log -r -l 10 --spider http://example.com # How to find broken links
bash$ wget -U 'My-User-Agent' http://www.foo.com
bash$ wget -S http://www.bbc.co.uk # How to view response headers
bash$ wget -S --spider http://www.bbc.co.uk # without downloading
bash$ wget --directory-prefix=folder/subfolder example.com
bash$ wget http://example.com/images/{1..20}.jpg # Download a list of sequentially numbered files from a server
bash$ wget --page-requisites --span-hosts --convert-links --adjust-extension http://example.com/dir/file # Download a web page with all assets – like stylesheets and inline images
bash$ wget --execute robots=off --recursive --no-parent --continue --no-clobber http://example.com/ # Download an entire website including all the linked pages and files
bash$ wget --directory-prefix=files/pictures --no-directories --recursive --no-clobber --accept jpg,gif,png,jpeg http://example.com/images/
bash$ wget --mirror --domains=abc.com,files.abc.com,docs.abc.com --accept=pdf http://abc.com/
bash$ wget --recursive --no-clobber --no-parent --exclude-directories /forums,/support http://example.com
bash$ wget --spider --server-response http://example.com/file.iso # Find the size of a file without downloading it (look for Content Length in the response, the size is in bytes)
bash$ wget --output-document – --quiet google.com/humans.txt
bash$ wget --limit-rate=20k --wait=60 --random-wait --mirror example.com
bash$ wget -H -r --level=1 -k -p http://ginatrapani.googlepages.com # Say you want to retrieve all the pages in a site PLUS the pages that site links to.
bash$ wget -Q5m -i FILE-WHICH-HAS-URLS # stop download when it crosses 5 MB