Ignored By Dinosaurs 🦕

varnish

This is an interweaving of Four Kitchens' Varnish 3 VCL and this generic Varnish 4 VCL.


vcl 4.0;
# Based on: https://github.com/mattiasgeniar/varnish-4.0-configuration-templates/blob/master/default.vcl

import std;
import directors;

backend server1 { # Define one backend
 .host = "127.0.0.1"; # IP or Hostname of backend
 .port = "8080"; # Port Apache or whatever is listening
 .max_connections = 300; # That's it

 .probe = {
 #.url = "/"; # short easy way (GET /)
 # We prefer to only do a HEAD /
 .request =
 "HEAD / HTTP/1.1"
 "Host: localhost"
 "Connection: close"
 "User-Agent: Varnish Health Probe";

 .interval = 5s; # check the health of each backend every 5 seconds
 .timeout = 1s; # timing out after 1 second.
 .window = 5; # If 3 out of the last 5 polls succeeded the backend is considered healthy, otherwise it will be marked as sick
 .threshold = 3;
 }

 .first_byte_timeout = 300s; # How long to wait before we receive a first byte from our backend?
 .connect_timeout = 5s; # How long to wait for a backend connection?
 .between_bytes_timeout = 2s; # How long to wait between bytes received from our backend?
}

/\*acl purge {
 # ACL we'll use later to allow purges
 "localhost";
 "127.0.0.1";
 "::1";
}\*/


/\*acl editors {
 # ACL to honor the "Cache-Control: no-cache" header to force a refresh but only from selected IPs
 "localhost";
 "127.0.0.1";
 "::1";
}\*/

sub vcl_init {
 # Called when VCL is loaded, before any requests pass through it.
 # Typically used to initialize VMODs.

 new vdir = directors.round_robin();
 vdir.add_backend(server1);
 # vdir.add_backend(server...);
 # vdir.add_backend(servern);
}

sub vcl_recv {
 # Called at the beginning of a request, after the complete request has been received and parsed.
 # Its purpose is to decide whether or not to serve the request, how to do it, and, if applicable,
 # which backend to use.
 # also used to modify the request
 call ban_list;
 #set req.url = std.tolower(req.url);

 set req.backend_hint = vdir.backend(); # send all traffic to the vdir director

 # Normalize the header, remove the port (in case you're testing this on various TCP ports)
 set req.http.Host = regsub(req.http.Host, ":[0-9]+", "");

 # Normalize the query arguments
 set req.url = std.querysort(req.url);

 # Allow purging
 if (req.method == "PURGE") {
/\* if (!std.ip(req.http.X-Forwarded-For, "0.0.0.0") ~ purge) { # purge is the ACL defined at the begining
 # Not from an allowed IP? Then die with an error.
 return (synth(405, "This IP - " + std.ip(req.http.X-Forwarded-For, "0.0.0.0") + " is not allowed to send PURGE requests."));
 }\*/
 # If you got this stage (and didn't error out above), purge the cached result
 return (purge);
 }

 # Only deal with "normal" types
 if (req.method != "GET" &&
 req.method != "HEAD" &&
 req.method != "PUT" &&
 req.method != "POST" &&
 req.method != "TRACE" &&
 req.method != "OPTIONS" &&
 req.method != "PATCH" &&
 req.method != "DELETE") {

 return (pipe);
 }

 if (req.url ~ "^/status\.php$" ||
 req.url ~ "^/update\.php$" ||
 req.url ~ "^/admin$" ||
 req.url ~ "^/admin/.\*$" ||
 req.url ~ "^/flag/.\*$" ||
 req.url ~ "^.\*/ajax/.\*$" ||
 req.url ~ "^.\*/ahah/.\*$") {
 return (pass);
 }

 # Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html)
 if (req.http.Upgrade ~ "(?i)websocket") {
 return (pipe);
 }

 # Drupal's batch mode will behave in a funky manner since all cookies except
 # for the session get stripped out below. This makes batch fall into 
 # op=do_nojs mode, which isn't really needed. Just get Varnish out of the way.
 if (req.url ~ "(^/batch)") {
 return (pipe);
 }

 # Only cache GET or HEAD requests. This makes sure the POST requests are always passed.
 if (req.method != "GET" && req.method != "HEAD") {
 return (pass);
 }

 # Strip hash, server doesn't need it.
 if (req.url ~ "\#") {
 set req.url = regsub(req.url, "\#.\*$", "");
 }

 # Strip a trailing ? if it exists
 if (req.url ~ "\?$") {
 set req.url = regsub(req.url, "\?$", "");
 }

 # Some generic cookie manipulation, useful for all templates that follow
 # Remove the "has_js" cookie

 if (req.http.Cookie) {
 # 1. Append a semi-colon to the front of the cookie string.
 # 2. Remove all spaces that appear after semi-colons.
 # 3. Match the cookies we want to keep, adding the space we removed
 # previously back. (\1) is first matching group in the regsuball.
 # 4. Remove all other cookies, identifying them by the fact that they have
 # no space after the preceding semi-colon.
 # 5. Remove all spaces and semi-colons from the beginning and end of the
 # cookie string.
 set req.http.Cookie = ";" + req.http.Cookie;
 set req.http.Cookie = regsuball(req.http.Cookie, "; +", ";"); 
 set req.http.Cookie = regsuball(req.http.Cookie, ";(SESS[a-z0-9]+|SSESS[a-z0-9]+|NO_CACHE)=", "; \1=");
 set req.http.Cookie = regsuball(req.http.Cookie, ";[^ ][^;]\*", "");
 set req.http.Cookie = regsuball(req.http.Cookie, "^[; ]+|[; ]+$", "");
 
 if (req.http.Cookie == "") {
 # If there are no remaining cookies, remove the cookie header. If there
 # aren't any cookie headers, Varnish's default behavior will be to cache
 # the page.
 unset req.http.Cookie;
 }
 else {
 # If there is any cookies left (a session or NO_CACHE cookie), do not
 # cache the page. Pass it on to Apache directly.
 return (pass);
 }
 }

 if (req.http.Cache-Control ~ "(?i)no-cache") {
 #if (req.http.Cache-Control ~ "(?i)no-cache" && client.ip ~ editors) { # create the acl editors if you want to restrict the Ctrl-F5
 # http://varnish.projects.linpro.no/wiki/VCLExampleEnableForceRefresh
 # Ignore requests via proxy caches and badly behaved crawlers
 # like msnbot that send no-cache with every request.
 if (! (req.http.Via || req.http.User-Agent ~ "(?i)bot" || req.http.X-Purge)) {
 #set req.hash_always_miss = true; # Doesn't seems to refresh the object in the cache
 return(purge); # Couple this with restart in vcl_purge and X-Purge header to avoid loops
 }
 }

 # Large static files are delivered directly to the end-user without
 # waiting for Varnish to fully read the file first.
 # Varnish 4 fully supports Streaming, so set do_stream in vcl_backend_response()
 if (req.url ~ "^[^?]\*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip)(\?.\*)?$") {
 unset req.http.Cookie;
 return (hash);
 }

 # Remove all cookies for static files
 # A valid discussion could be held on this line: do you really need to cache static files that don't cause load? Only if you have memory left.
 # Sure, there's disk I/O, but chances are your OS will already have these files in their buffers (thus memory).
 # Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
 if (req.url ~ "^[^?]\*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.\*)?$") {
 unset req.http.Cookie;
 return (hash);
 }

 # Send Surrogate-Capability headers to announce ESI support to backend
 set req.http.Surrogate-Capability = "key=ESI/1.0";

 if (req.http.Authorization) {
 # Not cacheable by default
 return (pass);
 }

 return (hash);
}

sub vcl_pipe {
 # Called upon entering pipe mode.
 # In this mode, the request is passed on to the backend, and any further data from both the client
 # and backend is passed on unaltered until either end closes the connection. Basically, Varnish will
 # degrade into a simple TCP proxy, shuffling bytes back and forth. For a connection in pipe mode,
 # no other VCL subroutine will ever get called after vcl_pipe.

 # Note that only the first request to the backend will have
 # X-Forwarded-For set. If you use X-Forwarded-For and want to
 # have it set for all requests, make sure to have:
 # set bereq.http.connection = "close";
 # here. It is not set by default as it might break some broken web
 # applications, like IIS with NTLM authentication.

 # set bereq.http.Connection = "Close";

 # Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html)
 if (req.http.upgrade) {
 set bereq.http.upgrade = req.http.upgrade;
 }

 return (pipe);
}

sub vcl_pass {
 # Called upon entering pass mode. In this mode, the request is passed on to the backend, and the
 # backend's response is passed on to the client, but is not entered into the cache. Subsequent
 # requests submitted over the same client connection are handled normally.

 # return (pass);
}

# The data on which the hashing will take place
sub vcl_hash {
 # Called after vcl_recv to create a hash value for the request. This is used as a key
 # to look up the object in Varnish.

 hash_data(req.url);

 if (req.http.host) {
 hash_data(req.http.host);
 } else {
 hash_data(server.ip);
 }

 # hash cookies for requests that have them
 if (req.http.Cookie) {
 hash_data(req.http.Cookie);
 }
}

sub vcl_hit {
 # Called when a cache lookup is successful.

 if (obj.ttl >= 0s) {
 # A pure unadultered hit, deliver it
 return (deliver);
 }

 # https://www.varnish-cache.org/docs/trunk/users-guide/vcl-grace.html
 # When several clients are requesting the same page Varnish will send one request to the backend and place the others on hold while fetching one copy from the backend. In some products this is called request coalescing and Varnish does this automatically.
 # If you are serving thousands of hits per second the queue of waiting requests can get huge. There are two potential problems - one is a thundering herd problem - suddenly releasing a thousand threads to serve content might send the load sky high. Secondly - nobody likes to wait. To deal with this we can instruct Varnish to keep the objects in cache beyond their TTL and to serve the waiting requests somewhat stale content.

# if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) {
# return (deliver);
# } else {
# return (miss);
# }

 # We have no fresh fish. Lets look at the stale ones.
 if (std.healthy(req.backend_hint)) {
 # Backend is healthy. Limit age to 10s.
 if (obj.ttl + 10s > 0s) {
 #set req.http.grace = "normal(limited)";
 return (deliver);
 } else {
 # No candidate for grace. Fetch a fresh object.
 return(miss);
 }
 } else {
 # backend is sick - use full grace
 if (obj.ttl + obj.grace > 0s) {
 #set req.http.grace = "full";
 return (deliver);
 } else {
 # no graced object.
 return (miss);
 }
 }

 # fetch & deliver once we get the result
 return (miss); # Dead code, keep as a safeguard
}

sub vcl_miss {
 # Called after a cache lookup if the requested document was not found in the cache. Its purpose
 # is to decide whether or not to attempt to retrieve the document from the backend, and which
 # backend to use.

 return (fetch);
}

# Handle the HTTP request coming from our backend
sub vcl_backend_response {
 # Called after the response headers has been successfully retrieved from the backend.
 # set beresp.http.X-Backend = beresp.backend.name;

 # Pause ESI request and remove Surrogate-Control header
 if (beresp.http.Surrogate-Control ~ "ESI/1.0") {
 unset beresp.http.Surrogate-Control;
 set beresp.do_esi = true;
 }

 # Enable cache for all static files
 # The same argument as the static caches from above: monitor your cache size, if you get data nuked out of it, consider giving up the static file cache.
 # Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
 if (bereq.url ~ "^[^?]\*\.(7z|avi|bmp|bz2|css|csv|doc|docx|eot|flac|flv|gif|gz|ico|jpeg|jpg|js|less|mka|mkv|mov|mp3|mp4|mpeg|mpg|odt|otf|ogg|ogm|opus|pdf|png|ppt|pptx|rar|rtf|svg|svgz|swf|tar|tbz|tgz|ttf|txt|txz|wav|webm|webp|woff|woff2|xls|xlsx|xml|xz|zip)(\?.\*)?$") {
 unset beresp.http.set-cookie;
 }

 # Large static files are delivered directly to the end-user without
 # waiting for Varnish to fully read the file first.
 # Varnish 4 fully supports Streaming, so use streaming here to avoid locking.
 if (bereq.url ~ "^[^?]\*\.(7z|avi|bz2|flac|flv|gz|mka|mkv|mov|mp3|mp4|mpeg|mpg|ogg|ogm|opus|rar|tar|tgz|tbz|txz|wav|webm|xz|zip|csv)(\?.\*)?$") {
 unset beresp.http.set-cookie;
 set beresp.do_stream = true; # Check memory usage it'll grow in fetch_chunksize blocks (128k by default) if the backend doesn't send a Content-Length header, so only enable it for big objects
 set beresp.do_gzip = false; # Don't try to compress it for storage
 }

 # Sometimes, a 301 or 302 redirect formed via Apache's mod_rewrite can mess with the HTTP port that is being passed along.
 # This often happens with simple rewrite rules in a scenario where Varnish runs on :80 and Apache on :8080 on the same box.
 # A redirect can then often redirect the end-user to a URL on :8080, where it should be :80.
 # This may need finetuning on your setup.
 #
 # To prevent accidental replace, we only filter the 301/302 redirects for now.
 if (beresp.status == 301 || beresp.status == 302) {
 set beresp.http.Location = regsub(beresp.http.Location, ":[0-9]+", "");
 }

 # Set 2min cache if unset for static files
 if (beresp.ttl <= 0s || beresp.http.Set-Cookie || beresp.http.Vary == "\*") {
 set beresp.ttl = 120s; # Important, you shouldn't rely on this, SET YOUR HEADERS in the backend
 set beresp.uncacheable = true;
 return (deliver);
 }

 # Don't cache 50x responses
 if (beresp.status == 500 || beresp.status == 502 || beresp.status == 503 || beresp.status == 504) {
 return (abandon);
 }

 # Allow stale content, in case the backend goes down.
 # make Varnish keep all objects for 6 hours beyond their TTL
 set beresp.grace = 6h;

 return (deliver);
}

# The routine when we deliver the HTTP request to the user
# Last chance to modify headers that are sent to the client
sub vcl_deliver {
 # Called before a cached object is delivered to the client.

 if (obj.hits > 0) { # Add debug header to see if it's a HIT/MISS and the number of hits, disable when not needed
 set resp.http.X-Cache = "HIT";
 } else {
 set resp.http.X-Cache = "MISS";
 }

 # Please note that obj.hits behaviour changed in 4.0, now it counts per objecthead, not per object
 # and obj.hits may not be reset in some cases where bans are in use. See bug 1492 for details.
 # So take hits with a grain of salt
 set resp.http.X-Cache-Hits = obj.hits;

 # Remove some headers: PHP version
 unset resp.http.X-Powered-By;

 # Remove some headers: Apache version & OS
 unset resp.http.Server;
 unset resp.http.X-Drupal-Cache;
 unset resp.http.X-Varnish;
 unset resp.http.Via;
 unset resp.http.Link;
 unset resp.http.X-Generator;

 return (deliver);
}

sub vcl_purge {
 # Only handle actual PURGE HTTP methods, everything else is discarded
 if (req.method != "PURGE") {
 # restart request
 set req.http.X-Purge = "Yes";
 return(restart);
 }
}

sub vcl_synth {
 if (resp.status == 720) {
 # We use this special error status 720 to force redirects with 301 (permanent) redirects
 # To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html"));
 set resp.http.Location = resp.reason;
 set resp.status = 301;
 return (deliver);
 } elseif (resp.status == 721) {
 # And we use error status 721 to force redirects with a 302 (temporary) redirect
 # To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html"));
 set resp.http.Location = resp.reason;
 set resp.status = 302;
 return (deliver);
 }

 return (deliver);
}


sub vcl_fini {
 # Called when VCL is discarded only after all requests have exited the VCL.
 # Typically used to clean up VMODs.

 return (ok);
}

#varnish

Prelude

Fastly is a CDN (content deliver network). A CDN makes your site faster by acting as a caching layer between the world wide web and your webserver. It does this by having a globally distributed network of servers and by using some DNS trickery to make sure that when someone puts in the address of your website they're actually requesting that page from the nearest server in that network.

$ host www.ecnmag.com
www.ecnmag.com is an alias for global.prod.fastly.net.
global.prod.fastly.net is an alias for global-ssl.fastly.net.
global-ssl.fastly.net is an alias for fallback.global-ssl.fastly.net.
fallback.global-ssl.fastly.net has address 23.235.39.184
fallback.global-ssl.fastly.net has address 199.27.76.185

If they don't have a copy of the requested page, they'll get it from your webserver and save it for the next time. Next time, they served the “cached version” which is way faster for your users and lightens the load on your webserver (since the request never even makes it to your webserver). Excellent writeup here.

There are many different CDN vendors out there – Akamai being the oldest and most expensive that you may have heard of. A new entrant into the market is a company called Fastly. Fastly has decided on using Varnish as the core of their system. They have some heavyweight Varnish talent on the team and have added a few extremely cool features to “vanilla” Varnish that I'll get to in a moment.

Fastly's being built on top of Varnish is cool, mainly because every CDN out there has some sort of configuration language and to throw your hat in with any of them is also to throw your hat in with their particular configuration language. Varnish has a well known config format called VCL (Varnish configuration language) which, on top of having plenty of documentation and users out there already, is also portable to other installations of Varnish so that learning it is time well spent. This is the killer Fastly feature that first drew me in.


(you can skip this – backstory, not technical)

Prior to using the CDN as our front line to the rest of the internet, we'd been on a traditional “n-tier” web setup. This meant that any request to one of our sites from anywhere in the world would have to travel to a single point – our load balancer in Ashburn, Virginia in this case – and then travel all the way back to wherever. In addition to this obvious global networking performance suck, we use a managed hosting vendor, so they actually own and control our load balancer. Any changes that we'd want to have made to our VCL – the front line of defense against the WWW – would have to go through a support-ticket-and-review process. This was a bottleneck in the event of DDos situations, or any change to our caching setup for any reason.

Taking control of our caching front line was a neccessary step. This became the second killer Fastly feature once we started piloting a few of our sites on Fastly.


The killer-est killer feature of all has only just become clear to me. Fastly makes use of a feature called “Surrogate Keys” to improve the typical time-based expiration strategy that we'd been using for years now. They have a wonderful pair of blog posts on the topic here and here.

The way that Varnish works is basically a big, fast key-value store. How keys are generated and subsequently looked up, as well as how their values are stored are all subject to alteration by VCL, so you have a wonderful amount of control over the default methodology. By default it's essentially URLs as keys, and server responses as values, and this will get you pretty far down the line, but where you bump into the limits is as soon as you start pondering that each response has but one key that references it. Conversely, each key references only one object. By default...

Real life example – I work for a publishing company. Our websites are not super complicated IA-wise. We have pieces of content and listing pages of that content, organized mostly by some sort of topic. A piece of content can have any number of topics attached to it, and that piece of content (heretofore referred to as a “node”) should show up on the listing pages for any one of those terms.

Out of the box, Fastly/Drupal works really well for individual nodes. Drupal has a module for Fastly that communicates with their API to purge content when it's updated, so if an editor changes something on the node they won't have to wait at all for their changes to be reflected to unauthenticated users. The same is not true for listing pages. Since these pages are collections of content and have no deeper awareness of the individual members of the collection, they function on a typical time-based expiration strategy.

My strategy for the months since we launched this across all of our sites has been to set TTLs (time to live, basically the length of time something will be cached) as high as I can until an editor complains that content isn't showing up where they want it to. I recently had an editor start riding me about this, so lowered the TTLs to values so low that I knew we weren't getting much benefit of even having caching in the first place. I'd known about this Surrogate Key feature and decided to start having a deeper look.


The ideal caching scenario would have not only the node purged when editors updated it, but to have listing pages purge when a piece of content is published that should show up on that listing. This is where Surrogate Keys come into play. The Surrogate-Key HTTP header is a “space delimited collection of cache keys that pertain to an object in the cache”. If a purge request is sent to Fastly's API to purge “test-key”, anything with “test-key” in the Surrogate-Key header should fall out of cache and be regenerated.

In essence, what this means is that you can associate an arbitrary key with more than one object in the cache. You could tag anything on the route “api/mobile” with a surrogate-key “mobile” and when you want to purge your mobile endpoints, purge them all with one call rather than having to loop through every endpoint individually. On those topic listing pages you could use the topic or topic ID as a surrogate-key, and then any time a piece of content with that topic is added or updated, you can send a purge to that topic ID and have that listing page dropped. And only that listing page dropped.

// the basic algorithm, NOT functional Drupal code

if ($listing_page->type == "topic") {
	$keys = [];
	
	// Topics can have children, so fetch them.
	// pretend this returns a perfect array of topic IDs
	$topics = get_term_children($listing_page->topic);
	// Push the parent topic into the array as well.
	$topics[] = $listing_page->topic;
	
	foreach($topics as $topic) {
	$keys[] = $topic;
	}
	
	$key = implode(" ", $keys);
	add_http_header('Surrogate-Key', $key);
}

This results in a topic listing page getting a header like this -

# the parent topic ID as well as any child topic IDs
Surrogate-Key: 3979 3980 3779

Then, upon the update or creation of a node you do something like this -

// this would be something like hook_node_insert if you're a Drupalist
function example_node_save_subscriber($node) {
 $fastly = new Fastly(FASTLY_SERVICE_ID, API_KEY);
 foreach($node->topics as $topic_id) {
 $fastly->purgeKey($topic_id);
 }
}

This fires off a Fastly API call for each topic on that node that would cause anything with that surrogate key, aka topic ID, to be purged. This would be any topic listing page with this topic ID on it. Obviously if there are 500 topics on any piece of content you'll probably want to move this to a background job so you don't kill something, but you get the idea.


This is sort of like chasing the holy grail of caching. In theory this means that you are turning the caching TTLs up to maximum and only expiring something when it actually needs to be expired based on user action and intent, not based on some arbitrary time that I decide on based on my lust for having everything as fast as possible. The marvelous side effect of this is that (again in theory) everything should load even faster since there's almost no superfluous generation of pages at all.

I just released the code on Friday morning, and the editor who was previously riding me about this topic had only positive feedback for me, meaning – so far, so good.


FYI – the holy grail actually looks more like this -

#braindump #varnish #drupal #devops

I screwed this one up pretty bad when I first got started with Fastly. That have that link that says “VCL”, as if you should download that to get started. So I did. But that's the generated VCL, and all their special macros are already blown up, so you don't want to copy that one.

And the one on their website has HTML entities encoded, so I'm putting this one here in case I need it again later.

The original

sub vcl_recv {
#FASTLY recv

  if (req.request != "HEAD" && req.request != "GET" && req.request != "FASTLYPURGE") {
    return(pass);
  }

  return(lookup);
  }

sub vcl_fetch {
#FASTLY fetch

 if ((beresp.status == 500 || beresp.status == 503) && req.restarts < 1 && (req.request == "GET" || req.request == "HEAD")) {
 restart;
 }

 if(req.restarts > 0 ) {
 set beresp.http.Fastly-Restarts = req.restarts;
 }

 if (beresp.http.Set-Cookie) {
 set req.http.Fastly-Cachetype = "SETCOOKIE";
 return (pass);
 }

 if (beresp.http.Cache-Control ~ "private") {
 set req.http.Fastly-Cachetype = "PRIVATE";
 return (pass);
 }

 if (beresp.status == 500 || beresp.status == 503) {
 set req.http.Fastly-Cachetype = "ERROR";
 set beresp.ttl = 1s;
 set beresp.grace = 5s;
 return (deliver);
 }

 if (beresp.http.Expires || beresp.http.Surrogate-Control ~ "max-age" || beresp.http.Cache-Control ~"(s-maxage|max-age)") {
 # keep the ttl here
 } else {
 # apply the default ttl
 set beresp.ttl = 3600s;
 }

 return(deliver);
}

sub vcl_hit {
#FASTLY hit

 if (!obj.cacheable) {
 return(pass);
 }
 return(deliver);
}

sub vcl_miss {
#FASTLY miss
 return(fetch);
}

sub vcl_deliver {
#FASTLY deliver
 return(deliver);
}

sub vcl_error {
#FASTLY error
}

sub vcl_pass {
#FASTLY pass
}

#varnish