This is true only for ancient *NIX kernels. Modern kernels use the same technique as NT with caching backed by file mapping structures.
For example below is a call stack from my test machine running the Linux 4.12.2 kernel when ext4 read operation ext4_file_read_iter called the “Linux cache manager” ( do_generic_file_read -> page_cache_sync_readahead ) to bring data in the cache backed by mapped file structures( struct address_space ) when processing the read() system call.
This resulted in a recursive call to mapping->a_ops->readpages into a file system’s ext4_readpages . This is an analogue of a cached read in NT. Mac OS X uses the same caching by file mapping technique borrowed from BSD.
Thread 2 hit Breakpoint 9, ext4_readpages (file=0xffff88001d59b300, mapping=0xffff88001d1d56c0, pages=0xffffc90000817c30, nr_pages=1) at …/fs/ext4/inode.c:3308
3308 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
(gdb) bt
#0 ext4_readpages (file=0xffff88001d59b300, mapping=0xffff88001d1d56c0, pages=0xffffc90000817c30, nr_pages=1) at …/fs/ext4/inode.c:3308
#1 0xffffffff811b6288 in read_pages (gfp=, nr_pages=, pages=, filp=, mapping=) at …/mm/readahead.c:121
#2 __do_page_cache_readahead (mapping=, filp=, offset=1, nr_to_read=, lookahead_size=) at …/mm/readahead.c:199
#3 0xffffffff811b64b8 in ra_submit (ra=, ra=, ra=, filp=, mapping=) at …/mm/internal.h:66
#4 ondemand_readahead (mapping=0xffff88001d1d56c0, ra=0xffff88001d59b398, filp=0xffff88001d59b300, hit_readahead_marker=, offset=0, req_size=) at …/mm/readahead.c:478
#5 0xffffffff811b678e in page_cache_sync_readahead (mapping=, ra=, filp=, offset=, req_size=) at …/mm/readahead.c:510
#6 0xffffffff811a7a62 in do_generic_file_read (written=, iter=, ppos=, filp=) at …/mm/filemap.c:1813
#7 generic_file_read_iter (iocb=0x20000, iter=) at …/mm/filemap.c:2069
#8 0xffffffff812d1386 in ext4_file_read_iter (iocb=0xffff88001d59b300, to=0xffff88001d1d56c0) at …/fs/ext4/file.c:70
#9 0xffffffff81237680 in call_read_iter (file=, iter=, kio=) at …/include/linux/fs.h:1728
#10 new_sync_read (ppos=, len=, buf=, filp=) at …/fs/read_write.c:440
#11__vfs_read (file=0xffff88001d59b300, buf=, count=, pos=0xffffc90000817f18) at …/fs/read_write.c:452
#12 0xffffffff81237cc3 in vfs_read (file=0xffff88001d59b300, buf=0x7fb92a0cb000 <error: cannot access memory at address>, count=, pos=0xffffc90000817f18)
at …/fs/read_write.c:473
#13 0xffffffff81239385 in SYSC_read (count=, buf=, fd=) at …/fs/read_write.c:589
#14 SyS_read (fd=, buf=140433251151872, count=131072) at …/fs/read_write.c:582
#15 0xffffffff818aaffb in entry_SYSCALL_64 () at …/arch/x86/entry/entry_64.S:203
(gdb) f 4
#4 ondemand_readahead (mapping=0xffff88001d1d56c0, ra=0xffff88001d59b398, filp=0xffff88001d59b300, hit_readahead_marker=, offset=0, req_size=) at …/mm/readahead.c:478
478 return ra_submit(ra, mapping, filp);
(gdb) p/x mapping
$13 = 0xffff88001d1d56c0
(gdb) p/x *mapping
$14 = {host = 0xffff88001d1d5548, page_tree = {gfp_mask = 0x1180020, rnode = 0x0}, tree_lock = {{rlock = {raw_lock = {val = {counter = 0x0}}}}}, i_mmap_writable = {counter = 0x0}, i_mmap = {
rb_node = 0x0}, i_mmap_rwsem = {count = {counter = 0x0}, wait_list = {next = 0xffff88001d1d56f0, prev = 0xffff88001d1d56f0}, wait_lock = {raw_lock = {val = {counter = 0x0}}}, osq = {tail = {
counter = 0x0}}, owner = 0x0}, nrpages = 0x0, nrexceptional = 0x0, writeback_index = 0x0, a_ops = 0xffffffff81a3a680, flags = 0x0, private_lock = {{rlock = {raw_lock = {val = {
counter = 0x0}}}}}, gfp_mask = 0x14200ca, private_list = {next = 0xffff88001d1d5740, prev = 0xffff88001d1d5740}, private_data = 0x0}
(gdb) ptype mapping
type = struct address_space {
struct inode *host;
struct radix_tree_root page_tree;
spinlock_t tree_lock;
atomic_t i_mmap_writable;
struct rb_root i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
unsigned long nrexceptional;
unsigned long writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
spinlock_t private_lock;
gfp_t gfp_mask;
struct list_head private_list;
void *private_data;
} *</error:>