How can Beautifulsoup scrape the pages inside this list of hyperlinks?

A simple way would be to make an initial request and extract all the links in the second column of the table.

Then loop those links, make requests, and continue with your existing code, except to also handle cases where no table present.

import csv
import requests
from bs4 import BeautifulSoup as bs

headers = []
datarows = []

with requests.Session() as s:
    s.headers = {"User-Agent": "Safari/537.36"}
    r = s.get('https://bitinfocharts.com/top-100-richest-dogecoin-addresses-3.html')
    soup = bs(r.content, 'lxml')
    address_links = [i['href'] for i in soup.select('.table td:nth-child(2) > a')]
    
    for url in address_links:

        r = s.get(url)
        soup = bs(r.content, 'lxml')
        table = soup.find(id="table_maina")
        
        if table:
            item = soup.find('h1').text
            newitem = item.replace('Dogecoin','')
            finalitem = newitem.replace('Address','')

            for row in table.find_all('tr'):
                heads = row.find_all('th')
                if heads:
                    headers = [th.text for th in heads]
                else:
                    datarows.append([td.text for td in row.find_all('td')])

            fcsv = csv.writer(open(f'{finalitem}.csv', 'w', newline=''))
            fcsv.writerow(headers)
            fcsv.writerows(datarows)
        else:
            print('no table for: ', url)