Start reordering the methods in headers in net/.

This patch also starts reordering some of the cc files to match their
headers. More of both cleanups will be done in future patches.

BUG=68682
TEST=compiles

Review URL: http://codereview.chromium.org/6085013

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@70799 0039d316-1c4b-4281-b951-d872f2087c98
diff --git a/net/base/cert_database.h b/net/base/cert_database.h
index 7915cc6..5ffb6a25 100644
--- a/net/base/cert_database.h
+++ b/net/base/cert_database.h
@@ -29,6 +29,17 @@
 
 class CertDatabase {
  public:
+  // Stores per-certificate error codes for import failures.
+  struct ImportCertFailure {
+   public:
+    ImportCertFailure(X509Certificate* cert, int err);
+    ~ImportCertFailure();
+
+    scoped_refptr<X509Certificate> certificate;
+    int net_error;
+  };
+  typedef std::vector<ImportCertFailure> ImportCertFailureList;
+
   // Constants that define which usages a certificate is trusted for.
   // They are used in combination with CertType to specify trust for each type
   // of certificate.
@@ -45,17 +56,6 @@
     TRUSTED_OBJ_SIGN = 1 << 2,
   };
 
-  // Stores per-certificate error codes for import failures.
-  struct ImportCertFailure {
-   public:
-    ImportCertFailure(X509Certificate* cert, int err);
-    ~ImportCertFailure();
-
-    scoped_refptr<X509Certificate> certificate;
-    int net_error;
-  };
-  typedef std::vector<ImportCertFailure> ImportCertFailureList;
-
   CertDatabase();
 
   // Check whether this is a valid user cert that we have the private key for.
diff --git a/net/base/gzip_header.h b/net/base/gzip_header.h
index 81f6f19..c245de2c 100644
--- a/net/base/gzip_header.h
+++ b/net/base/gzip_header.h
@@ -20,9 +20,16 @@
 
 class GZipHeader {
  public:
+  enum Status {
+    INCOMPLETE_HEADER,    // don't have all the bits yet...
+    COMPLETE_HEADER,      // complete, valid header
+    INVALID_HEADER,       // found something invalid in the header
+  };
+
   GZipHeader() {
     Reset();
   }
+
   ~GZipHeader() {
   }
 
@@ -33,12 +40,6 @@
     extra_length_ = 0;
   }
 
-  enum Status {
-    INCOMPLETE_HEADER,    // don't have all the bits yet...
-    COMPLETE_HEADER,      // complete, valid header
-    INVALID_HEADER,       // found something invalid in the header
-  };
-
   // Attempt to parse the given buffer as the next installment of
   // bytes from a gzip header. If the bytes we've seen so far do not
   // yet constitute a complete gzip header, return
@@ -49,9 +50,6 @@
   Status ReadMore(const char* inbuf, int inbuf_len,
                   const char** header_end);
  private:
-
-  static const uint8 magic[];  // gzip magic header
-
   enum {                       // flags (see RFC)
     FLAG_FTEXT        = 0x01,  // bit 0 set: file probably ascii text
     FLAG_FHCRC        = 0x02,  // bit 1 set: header CRC present
@@ -88,6 +86,8 @@
     IN_DONE,
   };
 
+  static const uint8 magic[];  // gzip magic header
+
   int    state_;  // our current State in the parsing FSM: an int so we can ++
   uint8  flags_;         // the flags byte of the header ("FLG" in the RFC)
   uint16 extra_length_;  // how much of the "extra field" we have yet to read
diff --git a/net/base/mock_host_resolver.h b/net/base/mock_host_resolver.h
index 2972c847..3b1e36a 100644
--- a/net/base/mock_host_resolver.h
+++ b/net/base/mock_host_resolver.h
@@ -41,16 +41,6 @@
  public:
   virtual ~MockHostResolverBase();
 
-  // HostResolver methods:
-  virtual int Resolve(const RequestInfo& info,
-                      AddressList* addresses,
-                      CompletionCallback* callback,
-                      RequestHandle* out_req,
-                      const BoundNetLog& net_log);
-  virtual void CancelRequest(RequestHandle req);
-  virtual void AddObserver(Observer* observer);
-  virtual void RemoveObserver(Observer* observer);
-
   RuleBasedHostResolverProc* rules() { return rules_; }
 
   // Controls whether resolutions complete synchronously or asynchronously.
@@ -68,6 +58,16 @@
         pool_index, max_outstanding_jobs, max_pending_requests);
   }
 
+  // HostResolver methods:
+  virtual int Resolve(const RequestInfo& info,
+                      AddressList* addresses,
+                      CompletionCallback* callback,
+                      RequestHandle* out_req,
+                      const BoundNetLog& net_log);
+  virtual void CancelRequest(RequestHandle req);
+  virtual void AddObserver(Observer* observer);
+  virtual void RemoveObserver(Observer* observer);
+
  protected:
   MockHostResolverBase(bool use_caching);
 
@@ -143,11 +143,11 @@
                       int* os_error);
 
  private:
-  ~RuleBasedHostResolverProc();
-
   struct Rule;
   typedef std::list<Rule> RuleList;
 
+  ~RuleBasedHostResolverProc();
+
   RuleList rules_;
 };
 
diff --git a/net/base/registry_controlled_domain.h b/net/base/registry_controlled_domain.h
index 7586c12..90a1b8f 100644
--- a/net/base/registry_controlled_domain.h
+++ b/net/base/registry_controlled_domain.h
@@ -204,6 +204,8 @@
   static RegistryControlledDomainService* GetInstance();
 
  protected:
+  typedef const struct DomainRule* (*FindDomainPtr)(const char *, unsigned int);
+
   // The entire protected API is only for unit testing.  I mean it.  Don't make
   // me come over there!
   RegistryControlledDomainService();
@@ -216,8 +218,6 @@
   static RegistryControlledDomainService* SetInstance(
       RegistryControlledDomainService* instance);
 
-  typedef const struct DomainRule* (*FindDomainPtr)(const char *, unsigned int);
-
   // Used for unit tests, so that a different perfect hash map from the full
   // list is used.
   static void UseFindDomainFunction(FindDomainPtr function);
diff --git a/net/base/sdch_manager.cc b/net/base/sdch_manager.cc
index 5aca923..18976c9 100644
--- a/net/base/sdch_manager.cc
+++ b/net/base/sdch_manager.cc
@@ -26,41 +26,176 @@
 // static
 SdchManager* SdchManager::global_;
 
+//------------------------------------------------------------------------------
+SdchManager::Dictionary::Dictionary(const std::string& dictionary_text,
+    size_t offset, const std::string& client_hash, const GURL& gurl,
+    const std::string& domain, const std::string& path, const Time& expiration,
+    const std::set<int> ports)
+      : text_(dictionary_text, offset),
+        client_hash_(client_hash),
+        url_(gurl),
+        domain_(domain),
+        path_(path),
+        expiration_(expiration),
+        ports_(ports) {
+}
+
+SdchManager::Dictionary::~Dictionary() {
+}
+
+bool SdchManager::Dictionary::CanAdvertise(const GURL& target_url) {
+  if (!SdchManager::Global()->IsInSupportedDomain(target_url))
+    return false;
+  /* The specific rules of when a dictionary should be advertised in an
+     Avail-Dictionary header are modeled after the rules for cookie scoping. The
+     terms "domain-match" and "pathmatch" are defined in RFC 2965 [6]. A
+     dictionary may be advertised in the Avail-Dictionaries header exactly when
+     all of the following are true:
+      1. The server's effective host name domain-matches the Domain attribute of
+         the dictionary.
+      2. If the dictionary has a Port attribute, the request port is one of the
+         ports listed in the Port attribute.
+      3. The request URI path-matches the path header of the dictionary.
+      4. The request is not an HTTPS request.
+    */
+  if (!DomainMatch(target_url, domain_))
+    return false;
+  if (!ports_.empty() && 0 == ports_.count(target_url.EffectiveIntPort()))
+    return false;
+  if (path_.size() && !PathMatch(target_url.path(), path_))
+    return false;
+  if (target_url.SchemeIsSecure())
+    return false;
+  if (Time::Now() > expiration_)
+    return false;
+  return true;
+}
+
+//------------------------------------------------------------------------------
+// Security functions restricting loads and use of dictionaries.
+
 // static
-SdchManager* SdchManager::Global() {
-  return global_;
+bool SdchManager::Dictionary::CanSet(const std::string& domain,
+                                     const std::string& path,
+                                     const std::set<int> ports,
+                                     const GURL& dictionary_url) {
+  if (!SdchManager::Global()->IsInSupportedDomain(dictionary_url))
+    return false;
+  /*
+  A dictionary is invalid and must not be stored if any of the following are
+  true:
+    1. The dictionary has no Domain attribute.
+    2. The effective host name that derives from the referer URL host name does
+      not domain-match the Domain attribute.
+    3. The Domain attribute is a top level domain.
+    4. The referer URL host is a host domain name (not IP address) and has the
+      form HD, where D is the value of the Domain attribute, and H is a string
+      that contains one or more dots.
+    5. If the dictionary has a Port attribute and the referer URL's port was not
+      in the list.
+  */
+
+  // TODO(jar): Redirects in dictionary fetches might plausibly be problematic,
+  // and hence the conservative approach is to not allow any redirects (if there
+  // were any... then don't allow the dictionary to be set).
+
+  if (domain.empty()) {
+    SdchErrorRecovery(DICTIONARY_MISSING_DOMAIN_SPECIFIER);
+    return false;  // Domain is required.
+  }
+  if (net::RegistryControlledDomainService::GetDomainAndRegistry(domain).size()
+      == 0) {
+    SdchErrorRecovery(DICTIONARY_SPECIFIES_TOP_LEVEL_DOMAIN);
+    return false;  // domain was a TLD.
+  }
+  if (!Dictionary::DomainMatch(dictionary_url, domain)) {
+    SdchErrorRecovery(DICTIONARY_DOMAIN_NOT_MATCHING_SOURCE_URL);
+    return false;
+  }
+
+  std::string referrer_url_host = dictionary_url.host();
+  size_t postfix_domain_index = referrer_url_host.rfind(domain);
+  // See if it is indeed a postfix, or just an internal string.
+  if (referrer_url_host.size() == postfix_domain_index + domain.size()) {
+    // It is a postfix... so check to see if there's a dot in the prefix.
+    size_t end_of_host_index = referrer_url_host.find_first_of('.');
+    if (referrer_url_host.npos != end_of_host_index  &&
+        end_of_host_index < postfix_domain_index) {
+      SdchErrorRecovery(DICTIONARY_REFERER_URL_HAS_DOT_IN_PREFIX);
+      return false;
+    }
+  }
+
+  if (!ports.empty()
+      && 0 == ports.count(dictionary_url.EffectiveIntPort())) {
+    SdchErrorRecovery(DICTIONARY_PORT_NOT_MATCHING_SOURCE_URL);
+    return false;
+  }
+  return true;
 }
 
 // static
-void SdchManager::SdchErrorRecovery(ProblemCodes problem) {
-  UMA_HISTOGRAM_ENUMERATION("Sdch3.ProblemCodes_4", problem, MAX_PROBLEM_CODE);
+bool SdchManager::Dictionary::CanUse(const GURL& referring_url) {
+  if (!SdchManager::Global()->IsInSupportedDomain(referring_url))
+    return false;
+  /*
+    1. The request URL's host name domain-matches the Domain attribute of the
+      dictionary.
+    2. If the dictionary has a Port attribute, the request port is one of the
+      ports listed in the Port attribute.
+    3. The request URL path-matches the path attribute of the dictionary.
+    4. The request is not an HTTPS request.
+*/
+  if (!DomainMatch(referring_url, domain_)) {
+    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_DOMAIN);
+    return false;
+  }
+  if (!ports_.empty()
+      && 0 == ports_.count(referring_url.EffectiveIntPort())) {
+    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_PORT_LIST);
+    return false;
+  }
+  if (path_.size() && !PathMatch(referring_url.path(), path_)) {
+    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_PATH);
+    return false;
+  }
+  if (referring_url.SchemeIsSecure()) {
+    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_SCHEME);
+    return false;
+  }
+
+  // TODO(jar): Remove overly restrictive failsafe test (added per security
+  // review) when we have a need to be more general.
+  if (!referring_url.SchemeIs("http")) {
+    SdchErrorRecovery(ATTEMPT_TO_DECODE_NON_HTTP_DATA);
+    return false;
+  }
+
+  return true;
+}
+
+bool SdchManager::Dictionary::PathMatch(const std::string& path,
+                                        const std::string& restriction) {
+  /*  Must be either:
+  1. P2 is equal to P1
+  2. P2 is a prefix of P1 and either the final character in P2 is "/" or the
+      character following P2 in P1 is "/".
+      */
+  if (path == restriction)
+    return true;
+  size_t prefix_length = restriction.size();
+  if (prefix_length > path.size())
+    return false;  // Can't be a prefix.
+  if (0 != path.compare(0, prefix_length, restriction))
+    return false;
+  return restriction[prefix_length - 1] == '/' || path[prefix_length] == '/';
 }
 
 // static
-void SdchManager::ClearBlacklistings() {
-  Global()->blacklisted_domains_.clear();
-  Global()->exponential_blacklist_count.clear();
-}
-
-// static
-void SdchManager::ClearDomainBlacklisting(const std::string& domain) {
-  Global()->blacklisted_domains_.erase(StringToLowerASCII(domain));
-}
-
-// static
-int SdchManager::BlackListDomainCount(const std::string& domain) {
-  if (Global()->blacklisted_domains_.end() ==
-      Global()->blacklisted_domains_.find(domain))
-    return 0;
-  return Global()->blacklisted_domains_[StringToLowerASCII(domain)];
-}
-
-// static
-int SdchManager::BlacklistDomainExponential(const std::string& domain) {
-  if (Global()->exponential_blacklist_count.end() ==
-      Global()->exponential_blacklist_count.find(domain))
-    return 0;
-  return Global()->exponential_blacklist_count[StringToLowerASCII(domain)];
+bool SdchManager::Dictionary::DomainMatch(const GURL& gurl,
+                                          const std::string& restriction) {
+  // TODO(jar): This is not precisely a domain match definition.
+  return gurl.DomainIs(restriction.data(), restriction.size());
 }
 
 //------------------------------------------------------------------------------
@@ -87,6 +222,22 @@
 }
 
 // static
+SdchManager* SdchManager::Global() {
+  return global_;
+}
+
+// static
+void SdchManager::SdchErrorRecovery(ProblemCodes problem) {
+  UMA_HISTOGRAM_ENUMERATION("Sdch3.ProblemCodes_4", problem, MAX_PROBLEM_CODE);
+}
+
+void SdchManager::EnableSdchSupport(const std::string& domain) {
+  // We presume that there is a SDCH manager instance.
+  global_->supported_domain_ = domain;
+  global_->sdch_enabled_ = true;
+}
+
+// static
 void SdchManager::BlacklistDomain(const GURL& url) {
   if (!global_ )
     return;
@@ -117,10 +268,31 @@
   global_->blacklisted_domains_[domain] = INT_MAX;
 }
 
-void SdchManager::EnableSdchSupport(const std::string& domain) {
-  // We presume that there is a SDCH manager instance.
-  global_->supported_domain_ = domain;
-  global_->sdch_enabled_ = true;
+// static
+void SdchManager::ClearBlacklistings() {
+  Global()->blacklisted_domains_.clear();
+  Global()->exponential_blacklist_count.clear();
+}
+
+// static
+void SdchManager::ClearDomainBlacklisting(const std::string& domain) {
+  Global()->blacklisted_domains_.erase(StringToLowerASCII(domain));
+}
+
+// static
+int SdchManager::BlackListDomainCount(const std::string& domain) {
+  if (Global()->blacklisted_domains_.end() ==
+      Global()->blacklisted_domains_.find(domain))
+    return 0;
+  return Global()->blacklisted_domains_[StringToLowerASCII(domain)];
+}
+
+// static
+int SdchManager::BlacklistDomainExponential(const std::string& domain) {
+  if (Global()->exponential_blacklist_count.end() ==
+      Global()->exponential_blacklist_count.find(domain))
+    return 0;
+  return Global()->exponential_blacklist_count[StringToLowerASCII(domain)];
 }
 
 bool SdchManager::IsInSupportedDomain(const GURL& url) {
@@ -147,6 +319,13 @@
   return false;
 }
 
+void SdchManager::FetchDictionary(const GURL& request_url,
+                                  const GURL& dictionary_url) {
+  if (SdchManager::Global()->CanFetchDictionary(request_url, dictionary_url) &&
+      fetcher_.get())
+    fetcher_->Schedule(dictionary_url);
+}
+
 bool SdchManager::CanFetchDictionary(const GURL& referring_url,
                                      const GURL& dictionary_url) const {
   /* The user agent may retrieve a dictionary from the dictionary URL if all of
@@ -179,13 +358,6 @@
   return true;
 }
 
-void SdchManager::FetchDictionary(const GURL& request_url,
-                                  const GURL& dictionary_url) {
-  if (SdchManager::Global()->CanFetchDictionary(request_url, dictionary_url) &&
-      fetcher_.get())
-    fetcher_->Schedule(dictionary_url);
-}
-
 bool SdchManager::AddSdchDictionary(const std::string& dictionary_text,
     const GURL& dictionary_url) {
   std::string client_hash;
@@ -317,22 +489,6 @@
     UMA_HISTOGRAM_COUNTS("Sdch3.Advertisement_Count", count);
 }
 
-SdchManager::Dictionary::Dictionary(const std::string& dictionary_text,
-    size_t offset, const std::string& client_hash, const GURL& gurl,
-    const std::string& domain, const std::string& path, const Time& expiration,
-    const std::set<int> ports)
-      : text_(dictionary_text, offset),
-        client_hash_(client_hash),
-        url_(gurl),
-        domain_(domain),
-        path_(path),
-        expiration_(expiration),
-        ports_(ports) {
-}
-
-SdchManager::Dictionary::~Dictionary() {
-}
-
 // static
 void SdchManager::GenerateHash(const std::string& dictionary_text,
     std::string* client_hash, std::string* server_hash) {
@@ -348,181 +504,6 @@
   DCHECK_EQ(client_hash->length(), 8u);
 }
 
-// static
-void SdchManager::UrlSafeBase64Encode(const std::string& input,
-                                      std::string* output) {
-  // Since this is only done during a dictionary load, and hashes are only 8
-  // characters, we just do the simple fixup, rather than rewriting the encoder.
-  base::Base64Encode(input, output);
-  for (size_t i = 0; i < output->size(); ++i) {
-    switch (output->data()[i]) {
-      case '+':
-        (*output)[i] = '-';
-        continue;
-      case '/':
-        (*output)[i] = '_';
-        continue;
-      default:
-        continue;
-    }
-  }
-}
-
-//------------------------------------------------------------------------------
-// Security functions restricting loads and use of dictionaries.
-
-// static
-bool SdchManager::Dictionary::CanSet(const std::string& domain,
-                                     const std::string& path,
-                                     const std::set<int> ports,
-                                     const GURL& dictionary_url) {
-  if (!SdchManager::Global()->IsInSupportedDomain(dictionary_url))
-    return false;
-  /*
-  A dictionary is invalid and must not be stored if any of the following are
-  true:
-    1. The dictionary has no Domain attribute.
-    2. The effective host name that derives from the referer URL host name does
-      not domain-match the Domain attribute.
-    3. The Domain attribute is a top level domain.
-    4. The referer URL host is a host domain name (not IP address) and has the
-      form HD, where D is the value of the Domain attribute, and H is a string
-      that contains one or more dots.
-    5. If the dictionary has a Port attribute and the referer URL's port was not
-      in the list.
-  */
-
-  // TODO(jar): Redirects in dictionary fetches might plausibly be problematic,
-  // and hence the conservative approach is to not allow any redirects (if there
-  // were any... then don't allow the dictionary to be set).
-
-  if (domain.empty()) {
-    SdchErrorRecovery(DICTIONARY_MISSING_DOMAIN_SPECIFIER);
-    return false;  // Domain is required.
-  }
-  if (net::RegistryControlledDomainService::GetDomainAndRegistry(domain).size()
-      == 0) {
-    SdchErrorRecovery(DICTIONARY_SPECIFIES_TOP_LEVEL_DOMAIN);
-    return false;  // domain was a TLD.
-  }
-  if (!Dictionary::DomainMatch(dictionary_url, domain)) {
-    SdchErrorRecovery(DICTIONARY_DOMAIN_NOT_MATCHING_SOURCE_URL);
-    return false;
-  }
-
-  std::string referrer_url_host = dictionary_url.host();
-  size_t postfix_domain_index = referrer_url_host.rfind(domain);
-  // See if it is indeed a postfix, or just an internal string.
-  if (referrer_url_host.size() == postfix_domain_index + domain.size()) {
-    // It is a postfix... so check to see if there's a dot in the prefix.
-    size_t end_of_host_index = referrer_url_host.find_first_of('.');
-    if (referrer_url_host.npos != end_of_host_index  &&
-        end_of_host_index < postfix_domain_index) {
-      SdchErrorRecovery(DICTIONARY_REFERER_URL_HAS_DOT_IN_PREFIX);
-      return false;
-    }
-  }
-
-  if (!ports.empty()
-      && 0 == ports.count(dictionary_url.EffectiveIntPort())) {
-    SdchErrorRecovery(DICTIONARY_PORT_NOT_MATCHING_SOURCE_URL);
-    return false;
-  }
-  return true;
-}
-
-// static
-bool SdchManager::Dictionary::CanUse(const GURL& referring_url) {
-  if (!SdchManager::Global()->IsInSupportedDomain(referring_url))
-    return false;
-  /*
-    1. The request URL's host name domain-matches the Domain attribute of the
-      dictionary.
-    2. If the dictionary has a Port attribute, the request port is one of the
-      ports listed in the Port attribute.
-    3. The request URL path-matches the path attribute of the dictionary.
-    4. The request is not an HTTPS request.
-*/
-  if (!DomainMatch(referring_url, domain_)) {
-    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_DOMAIN);
-    return false;
-  }
-  if (!ports_.empty()
-      && 0 == ports_.count(referring_url.EffectiveIntPort())) {
-    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_PORT_LIST);
-    return false;
-  }
-  if (path_.size() && !PathMatch(referring_url.path(), path_)) {
-    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_PATH);
-    return false;
-  }
-  if (referring_url.SchemeIsSecure()) {
-    SdchErrorRecovery(DICTIONARY_FOUND_HAS_WRONG_SCHEME);
-    return false;
-  }
-
-  // TODO(jar): Remove overly restrictive failsafe test (added per security
-  // review) when we have a need to be more general.
-  if (!referring_url.SchemeIs("http")) {
-    SdchErrorRecovery(ATTEMPT_TO_DECODE_NON_HTTP_DATA);
-    return false;
-  }
-
-  return true;
-}
-
-bool SdchManager::Dictionary::CanAdvertise(const GURL& target_url) {
-  if (!SdchManager::Global()->IsInSupportedDomain(target_url))
-    return false;
-  /* The specific rules of when a dictionary should be advertised in an
-     Avail-Dictionary header are modeled after the rules for cookie scoping. The
-     terms "domain-match" and "pathmatch" are defined in RFC 2965 [6]. A
-     dictionary may be advertised in the Avail-Dictionaries header exactly when
-     all of the following are true:
-      1. The server's effective host name domain-matches the Domain attribute of
-         the dictionary.
-      2. If the dictionary has a Port attribute, the request port is one of the
-         ports listed in the Port attribute.
-      3. The request URI path-matches the path header of the dictionary.
-      4. The request is not an HTTPS request.
-    */
-  if (!DomainMatch(target_url, domain_))
-    return false;
-  if (!ports_.empty() && 0 == ports_.count(target_url.EffectiveIntPort()))
-    return false;
-  if (path_.size() && !PathMatch(target_url.path(), path_))
-    return false;
-  if (target_url.SchemeIsSecure())
-    return false;
-  if (Time::Now() > expiration_)
-    return false;
-  return true;
-}
-
-bool SdchManager::Dictionary::PathMatch(const std::string& path,
-                                        const std::string& restriction) {
-  /*  Must be either:
-  1. P2 is equal to P1
-  2. P2 is a prefix of P1 and either the final character in P2 is "/" or the
-      character following P2 in P1 is "/".
-      */
-  if (path == restriction)
-    return true;
-  size_t prefix_length = restriction.size();
-  if (prefix_length > path.size())
-    return false;  // Can't be a prefix.
-  if (0 != path.compare(0, prefix_length, restriction))
-    return false;
-  return restriction[prefix_length - 1] == '/' || path[prefix_length] == '/';
-}
-
-// static
-bool SdchManager::Dictionary::DomainMatch(const GURL& gurl,
-                                          const std::string& restriction) {
-  // TODO(jar): This is not precisely a domain match definition.
-  return gurl.DomainIs(restriction.data(), restriction.size());
-}
-
 //------------------------------------------------------------------------------
 // Methods for supporting latency experiments.
 
@@ -542,3 +523,23 @@
   SdchErrorRecovery(LATENCY_TEST_DISALLOWED);
   allow_latency_experiment_.erase(it);
 }
+
+// static
+void SdchManager::UrlSafeBase64Encode(const std::string& input,
+                                      std::string* output) {
+  // Since this is only done during a dictionary load, and hashes are only 8
+  // characters, we just do the simple fixup, rather than rewriting the encoder.
+  base::Base64Encode(input, output);
+  for (size_t i = 0; i < output->size(); ++i) {
+    switch (output->data()[i]) {
+      case '+':
+        (*output)[i] = '-';
+        continue;
+      case '/':
+        (*output)[i] = '_';
+        continue;
+      default:
+        continue;
+    }
+  }
+}
diff --git a/net/base/ssl_config_service.h b/net/base/ssl_config_service.h
index de2ebef..0a9d569e 100644
--- a/net/base/ssl_config_service.h
+++ b/net/base/ssl_config_service.h
@@ -22,6 +22,9 @@
   SSLConfig();
   ~SSLConfig();
 
+  // Returns true if |cert| is one of the certs in |allowed_bad_certs|.
+  bool IsAllowedBadCert(X509Certificate* cert) const;
+
   bool rev_checking_enabled;  // True if server certificate revocation
                               // checking is enabled.
   // SSL 2.0 is not supported.
@@ -79,9 +82,6 @@
     int cert_status;
   };
 
-  // Returns true if |cert| is one of the certs in |allowed_bad_certs|.
-  bool IsAllowedBadCert(X509Certificate* cert) const;
-
   // Add any known-bad SSL certificate (with its cert status) to
   // |allowed_bad_certs| that should not trigger an ERR_CERT_* error when
   // calling SSLClientSocket::Connect.  This would normally be done in
diff --git a/net/base/transport_security_state.h b/net/base/transport_security_state.h
index fcd4e79..768ccbb 100644
--- a/net/base/transport_security_state.h
+++ b/net/base/transport_security_state.h
@@ -43,13 +43,13 @@
       //   * Certificate issues are fatal.
       MODE_SPDY_ONLY = 2,
     };
-    Mode mode;
 
     DomainState()
         : mode(MODE_STRICT),
           created(base::Time::Now()),
           include_subdomains(false) { }
 
+    Mode mode;
     base::Time created;  // when this host entry was first created
     base::Time expiry;  // the absolute time (UTC) when this record expires
     bool include_subdomains;  // subdomains included?
@@ -101,6 +101,10 @@
   // our state is dirty.
   void DirtyNotify();
 
+  static std::string CanonicaliseHost(const std::string& host);
+  static bool IsPreloadedSTS(const std::string& canonicalised_host,
+                             bool* out_include_subdomains);
+
   // The set of hosts that have enabled TransportSecurity. The keys here
   // are SHA256(DNSForm(domain)) where DNSForm converts from dotted form
   // ('www.google.com') to the form used in DNS: "\x03www\x06google\x03com"
@@ -109,10 +113,6 @@
   // Our delegate who gets notified when we are dirtied, or NULL.
   Delegate* delegate_;
 
-  static std::string CanonicaliseHost(const std::string& host);
-  static bool IsPreloadedSTS(const std::string& canonicalised_host,
-                             bool* out_include_subdomains);
-
   DISALLOW_COPY_AND_ASSIGN(TransportSecurityState);
 };
 
diff --git a/net/base/x509_certificate.h b/net/base/x509_certificate.h
index c59c33c..47178f5 100644
--- a/net/base/x509_certificate.h
+++ b/net/base/x509_certificate.h
@@ -109,6 +109,11 @@
                   FORMAT_PKCS7,
   };
 
+  // Creates a X509Certificate from the ground up.  Used by tests that simulate
+  // SSL connections.
+  X509Certificate(const std::string& subject, const std::string& issuer,
+                  base::Time start_date, base::Time expiration_date);
+
   // Create an X509Certificate from a handle to the certificate object in the
   // underlying crypto library. |source| specifies where |cert_handle| comes
   // from.  Given two certificate handles for the same certificate, our
@@ -175,11 +180,6 @@
                                            uint32 serial_number,
                                            base::TimeDelta valid_duration);
 
-  // Creates a X509Certificate from the ground up.  Used by tests that simulate
-  // SSL connections.
-  X509Certificate(const std::string& subject, const std::string& issuer,
-                  base::Time start_date, base::Time expiration_date);
-
   // Appends a representation of this object to the given pickle.
   void Persist(Pickle* pickle);
 
diff --git a/net/disk_cache/backend_impl.cc b/net/disk_cache/backend_impl.cc
index 223c970..31320a78 100644
--- a/net/disk_cache/backend_impl.cc
+++ b/net/disk_cache/backend_impl.cc
@@ -435,102 +435,6 @@
 
 // ------------------------------------------------------------------------
 
-int32 BackendImpl::GetEntryCount() const {
-  if (!index_ || disabled_)
-    return 0;
-  // num_entries includes entries already evicted.
-  int32 not_deleted = data_->header.num_entries -
-                      data_->header.lru.sizes[Rankings::DELETED];
-
-  if (not_deleted < 0) {
-    NOTREACHED();
-    not_deleted = 0;
-  }
-
-  return not_deleted;
-}
-
-int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
-                           CompletionCallback* callback) {
-  DCHECK(callback);
-  background_queue_.OpenEntry(key, entry, callback);
-  return net::ERR_IO_PENDING;
-}
-
-int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
-                             CompletionCallback* callback) {
-  DCHECK(callback);
-  background_queue_.CreateEntry(key, entry, callback);
-  return net::ERR_IO_PENDING;
-}
-
-int BackendImpl::DoomEntry(const std::string& key,
-                           CompletionCallback* callback) {
-  DCHECK(callback);
-  background_queue_.DoomEntry(key, callback);
-  return net::ERR_IO_PENDING;
-}
-
-int BackendImpl::DoomAllEntries(CompletionCallback* callback) {
-  DCHECK(callback);
-  background_queue_.DoomAllEntries(callback);
-  return net::ERR_IO_PENDING;
-}
-
-int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
-                                    const base::Time end_time,
-                                    CompletionCallback* callback) {
-  DCHECK(callback);
-  background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
-  return net::ERR_IO_PENDING;
-}
-
-int BackendImpl::DoomEntriesSince(const base::Time initial_time,
-                                  CompletionCallback* callback) {
-  DCHECK(callback);
-  background_queue_.DoomEntriesSince(initial_time, callback);
-  return net::ERR_IO_PENDING;
-}
-
-int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
-                               CompletionCallback* callback) {
-  DCHECK(callback);
-  background_queue_.OpenNextEntry(iter, next_entry, callback);
-  return net::ERR_IO_PENDING;
-}
-
-void BackendImpl::EndEnumeration(void** iter) {
-  background_queue_.EndEnumeration(*iter);
-  *iter = NULL;
-}
-
-void BackendImpl::GetStats(StatsItems* stats) {
-  if (disabled_)
-    return;
-
-  std::pair<std::string, std::string> item;
-
-  item.first = "Entries";
-  item.second = base::StringPrintf("%d", data_->header.num_entries);
-  stats->push_back(item);
-
-  item.first = "Pending IO";
-  item.second = base::StringPrintf("%d", num_pending_io_);
-  stats->push_back(item);
-
-  item.first = "Max size";
-  item.second = base::StringPrintf("%d", max_size_);
-  stats->push_back(item);
-
-  item.first = "Current size";
-  item.second = base::StringPrintf("%d", data_->header.num_bytes);
-  stats->push_back(item);
-
-  stats_.GetItems(stats);
-}
-
-// ------------------------------------------------------------------------
-
 int BackendImpl::SyncInit() {
   DCHECK(!init_);
   if (init_)
@@ -1355,6 +1259,102 @@
 
 // ------------------------------------------------------------------------
 
+int32 BackendImpl::GetEntryCount() const {
+  if (!index_ || disabled_)
+    return 0;
+  // num_entries includes entries already evicted.
+  int32 not_deleted = data_->header.num_entries -
+                      data_->header.lru.sizes[Rankings::DELETED];
+
+  if (not_deleted < 0) {
+    NOTREACHED();
+    not_deleted = 0;
+  }
+
+  return not_deleted;
+}
+
+int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
+                           CompletionCallback* callback) {
+  DCHECK(callback);
+  background_queue_.OpenEntry(key, entry, callback);
+  return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
+                             CompletionCallback* callback) {
+  DCHECK(callback);
+  background_queue_.CreateEntry(key, entry, callback);
+  return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomEntry(const std::string& key,
+                           CompletionCallback* callback) {
+  DCHECK(callback);
+  background_queue_.DoomEntry(key, callback);
+  return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomAllEntries(CompletionCallback* callback) {
+  DCHECK(callback);
+  background_queue_.DoomAllEntries(callback);
+  return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
+                                    const base::Time end_time,
+                                    CompletionCallback* callback) {
+  DCHECK(callback);
+  background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
+  return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::DoomEntriesSince(const base::Time initial_time,
+                                  CompletionCallback* callback) {
+  DCHECK(callback);
+  background_queue_.DoomEntriesSince(initial_time, callback);
+  return net::ERR_IO_PENDING;
+}
+
+int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+                               CompletionCallback* callback) {
+  DCHECK(callback);
+  background_queue_.OpenNextEntry(iter, next_entry, callback);
+  return net::ERR_IO_PENDING;
+}
+
+void BackendImpl::EndEnumeration(void** iter) {
+  background_queue_.EndEnumeration(*iter);
+  *iter = NULL;
+}
+
+void BackendImpl::GetStats(StatsItems* stats) {
+  if (disabled_)
+    return;
+
+  std::pair<std::string, std::string> item;
+
+  item.first = "Entries";
+  item.second = base::StringPrintf("%d", data_->header.num_entries);
+  stats->push_back(item);
+
+  item.first = "Pending IO";
+  item.second = base::StringPrintf("%d", num_pending_io_);
+  stats->push_back(item);
+
+  item.first = "Max size";
+  item.second = base::StringPrintf("%d", max_size_);
+  stats->push_back(item);
+
+  item.first = "Current size";
+  item.second = base::StringPrintf("%d", data_->header.num_bytes);
+  stats->push_back(item);
+
+  stats_.GetItems(stats);
+}
+
+// ------------------------------------------------------------------------
+
 // We just created a new file so we're going to write the header and set the
 // file length to include the hash table (zero filled).
 bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
diff --git a/net/disk_cache/backend_impl.h b/net/disk_cache/backend_impl.h
index b4815255..74a1eafd 100644
--- a/net/disk_cache/backend_impl.h
+++ b/net/disk_cache/backend_impl.h
@@ -60,24 +60,6 @@
   // Performs general initialization for this current instance of the cache.
   int Init(CompletionCallback* callback);
 
-  // Backend interface.
-  virtual int32 GetEntryCount() const;
-  virtual int OpenEntry(const std::string& key, Entry** entry,
-                        CompletionCallback* callback);
-  virtual int CreateEntry(const std::string& key, Entry** entry,
-                          CompletionCallback* callback);
-  virtual int DoomEntry(const std::string& key, CompletionCallback* callback);
-  virtual int DoomAllEntries(CompletionCallback* callback);
-  virtual int DoomEntriesBetween(const base::Time initial_time,
-                                 const base::Time end_time,
-                                 CompletionCallback* callback);
-  virtual int DoomEntriesSince(const base::Time initial_time,
-                               CompletionCallback* callback);
-  virtual int OpenNextEntry(void** iter, Entry** next_entry,
-                            CompletionCallback* callback);
-  virtual void EndEnumeration(void** iter);
-  virtual void GetStats(StatsItems* stats);
-
   // Performs the actual initialization and final cleanup on destruction.
   int SyncInit();
   void CleanupCache();
@@ -262,6 +244,24 @@
   // or an error code (negative value).
   int SelfCheck();
 
+  // Backend interface.
+  virtual int32 GetEntryCount() const;
+  virtual int OpenEntry(const std::string& key, Entry** entry,
+                        CompletionCallback* callback);
+  virtual int CreateEntry(const std::string& key, Entry** entry,
+                          CompletionCallback* callback);
+  virtual int DoomEntry(const std::string& key, CompletionCallback* callback);
+  virtual int DoomAllEntries(CompletionCallback* callback);
+  virtual int DoomEntriesBetween(const base::Time initial_time,
+                                 const base::Time end_time,
+                                 CompletionCallback* callback);
+  virtual int DoomEntriesSince(const base::Time initial_time,
+                               CompletionCallback* callback);
+  virtual int OpenNextEntry(void** iter, Entry** next_entry,
+                            CompletionCallback* callback);
+  virtual void EndEnumeration(void** iter);
+  virtual void GetStats(StatsItems* stats);
+
  private:
   typedef base::hash_map<CacheAddr, EntryImpl*> EntriesMap;
 
diff --git a/net/disk_cache/entry_impl.cc b/net/disk_cache/entry_impl.cc
index 3cb895f..ff05e1158 100644
--- a/net/disk_cache/entry_impl.cc
+++ b/net/disk_cache/entry_impl.cc
@@ -375,197 +375,6 @@
   }
 }
 
-// When an entry is deleted from the cache, we clean up all the data associated
-// with it for two reasons: to simplify the reuse of the block (we know that any
-// unused block is filled with zeros), and to simplify the handling of write /
-// read partial information from an entry (don't have to worry about returning
-// data related to a previous cache entry because the range was not fully
-// written before).
-EntryImpl::~EntryImpl() {
-  Log("~EntryImpl in");
-
-  // Save the sparse info to disk. This will generate IO for this entry and
-  // maybe for a child entry, so it is important to do it before deleting this
-  // entry.
-  sparse_.reset();
-
-  // Remove this entry from the list of open entries.
-  backend_->OnEntryDestroyBegin(entry_.address());
-
-  if (doomed_) {
-    DeleteEntryData(true);
-  } else {
-    net_log_.AddEvent(net::NetLog::TYPE_DISK_CACHE_CLOSE, NULL);
-    bool ret = true;
-    for (int index = 0; index < kNumStreams; index++) {
-      if (user_buffers_[index].get()) {
-        if (!(ret = Flush(index, 0)))
-          LOG(ERROR) << "Failed to save user data";
-      }
-      if (unreported_size_[index]) {
-        backend_->ModifyStorageSize(
-            entry_.Data()->data_size[index] - unreported_size_[index],
-            entry_.Data()->data_size[index]);
-      }
-    }
-
-    if (!ret) {
-      // There was a failure writing the actual data. Mark the entry as dirty.
-      int current_id = backend_->GetCurrentEntryId();
-      node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
-      node_.Store();
-    } else if (node_.HasData() && node_.Data()->dirty) {
-      node_.Data()->dirty = 0;
-      node_.Store();
-    }
-  }
-
-  Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
-  net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY, NULL);
-  backend_->OnEntryDestroyEnd();
-}
-
-void EntryImpl::Doom() {
-  backend_->background_queue()->DoomEntryImpl(this);
-}
-
-void EntryImpl::Close() {
-  backend_->background_queue()->CloseEntryImpl(this);
-}
-
-std::string EntryImpl::GetKey() const {
-  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
-  if (entry->Data()->key_len <= kMaxInternalKeyLength)
-    return std::string(entry->Data()->key);
-
-  // We keep a copy of the key so that we can always return it, even if the
-  // backend is disabled.
-  if (!key_.empty())
-    return key_;
-
-  Addr address(entry->Data()->long_key);
-  DCHECK(address.is_initialized());
-  size_t offset = 0;
-  if (address.is_block_file())
-    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
-
-  COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
-  File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
-                                                                kKeyFileIndex);
-
-  if (!key_file ||
-      !key_file->Read(WriteInto(&key_, entry->Data()->key_len + 1),
-                      entry->Data()->key_len + 1, offset))
-    key_.clear();
-  return key_;
-}
-
-Time EntryImpl::GetLastUsed() const {
-  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
-  return Time::FromInternalValue(node->Data()->last_used);
-}
-
-Time EntryImpl::GetLastModified() const {
-  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
-  return Time::FromInternalValue(node->Data()->last_modified);
-}
-
-int32 EntryImpl::GetDataSize(int index) const {
-  if (index < 0 || index >= kNumStreams)
-    return 0;
-
-  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
-  return entry->Data()->data_size[index];
-}
-
-int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
-                        net::CompletionCallback* callback) {
-  if (!callback)
-    return ReadDataImpl(index, offset, buf, buf_len, callback);
-
-  DCHECK(node_.Data()->dirty || read_only_);
-  if (index < 0 || index >= kNumStreams)
-    return net::ERR_INVALID_ARGUMENT;
-
-  int entry_size = entry_.Data()->data_size[index];
-  if (offset >= entry_size || offset < 0 || !buf_len)
-    return 0;
-
-  if (buf_len < 0)
-    return net::ERR_INVALID_ARGUMENT;
-
-  backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
-                                         callback);
-  return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
-                         CompletionCallback* callback, bool truncate) {
-  if (!callback)
-    return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
-
-  DCHECK(node_.Data()->dirty || read_only_);
-  if (index < 0 || index >= kNumStreams)
-    return net::ERR_INVALID_ARGUMENT;
-
-  if (offset < 0 || buf_len < 0)
-    return net::ERR_INVALID_ARGUMENT;
-
-  backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
-                                          truncate, callback);
-  return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
-                              net::CompletionCallback* callback) {
-  if (!callback)
-    return ReadSparseDataImpl(offset, buf, buf_len, callback);
-
-  backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
-                                               callback);
-  return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
-                               net::CompletionCallback* callback) {
-  if (!callback)
-    return WriteSparseDataImpl(offset, buf, buf_len, callback);
-
-  backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
-                                                callback);
-  return net::ERR_IO_PENDING;
-}
-
-int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
-                                 CompletionCallback* callback) {
-  backend_->background_queue()->GetAvailableRange(this, offset, len, start,
-                                                  callback);
-  return net::ERR_IO_PENDING;
-}
-
-bool EntryImpl::CouldBeSparse() const {
-  if (sparse_.get())
-    return true;
-
-  scoped_ptr<SparseControl> sparse;
-  sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
-  return sparse->CouldBeSparse();
-}
-
-void EntryImpl::CancelSparseIO() {
-  backend_->background_queue()->CancelSparseIO(this);
-}
-
-int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
-  if (!sparse_.get())
-    return net::OK;
-
-  backend_->background_queue()->ReadyForSparseIO(this, callback);
-  return net::ERR_IO_PENDING;
-}
-
-// ------------------------------------------------------------------------
-
 void EntryImpl::DoomImpl() {
   if (doomed_)
     return;
@@ -614,171 +423,6 @@
   return result;
 }
 
-int EntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf,
-                                int buf_len, CompletionCallback* callback) {
-  DCHECK(node_.Data()->dirty || read_only_);
-  DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
-  if (index < 0 || index >= kNumStreams)
-    return net::ERR_INVALID_ARGUMENT;
-
-  int entry_size = entry_.Data()->data_size[index];
-  if (offset >= entry_size || offset < 0 || !buf_len)
-    return 0;
-
-  if (buf_len < 0)
-    return net::ERR_INVALID_ARGUMENT;
-
-  TimeTicks start = TimeTicks::Now();
-
-  if (offset + buf_len > entry_size)
-    buf_len = entry_size - offset;
-
-  UpdateRank(false);
-
-  backend_->OnEvent(Stats::READ_DATA);
-  backend_->OnRead(buf_len);
-
-  Addr address(entry_.Data()->data_addr[index]);
-  int eof = address.is_initialized() ? entry_size : 0;
-  if (user_buffers_[index].get() &&
-      user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
-    // Complete the operation locally.
-    buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
-    ReportIOTime(kRead, start);
-    return buf_len;
-  }
-
-  address.set_value(entry_.Data()->data_addr[index]);
-  DCHECK(address.is_initialized());
-  if (!address.is_initialized())
-    return net::ERR_FAILED;
-
-  File* file = GetBackingFile(address, index);
-  if (!file)
-    return net::ERR_FAILED;
-
-  size_t file_offset = offset;
-  if (address.is_block_file()) {
-    DCHECK_LE(offset + buf_len, kMaxBlockSize);
-    file_offset += address.start_block() * address.BlockSize() +
-                   kBlockHeaderSize;
-  }
-
-  SyncCallback* io_callback = NULL;
-  if (callback) {
-    io_callback = new SyncCallback(this, buf, callback,
-                                   net::NetLog::TYPE_DISK_CACHE_READ_DATA);
-  }
-
-  bool completed;
-  if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
-    if (io_callback)
-      io_callback->Discard();
-    return net::ERR_FAILED;
-  }
-
-  if (io_callback && completed)
-    io_callback->Discard();
-
-  ReportIOTime(kRead, start);
-  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
-}
-
-int EntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf,
-                                 int buf_len, CompletionCallback* callback,
-                                 bool truncate) {
-  DCHECK(node_.Data()->dirty || read_only_);
-  DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
-  if (index < 0 || index >= kNumStreams)
-    return net::ERR_INVALID_ARGUMENT;
-
-  if (offset < 0 || buf_len < 0)
-    return net::ERR_INVALID_ARGUMENT;
-
-  int max_file_size = backend_->MaxFileSize();
-
-  // offset or buf_len could be negative numbers.
-  if (offset > max_file_size || buf_len > max_file_size ||
-      offset + buf_len > max_file_size) {
-    int size = offset + buf_len;
-    if (size <= max_file_size)
-      size = kint32max;
-    backend_->TooMuchStorageRequested(size);
-    return net::ERR_FAILED;
-  }
-
-  TimeTicks start = TimeTicks::Now();
-
-  // Read the size at this point (it may change inside prepare).
-  int entry_size = entry_.Data()->data_size[index];
-  bool extending = entry_size < offset + buf_len;
-  truncate = truncate && entry_size > offset + buf_len;
-  Trace("To PrepareTarget 0x%x", entry_.address().value());
-  if (!PrepareTarget(index, offset, buf_len, truncate))
-    return net::ERR_FAILED;
-
-  Trace("From PrepareTarget 0x%x", entry_.address().value());
-  if (extending || truncate)
-    UpdateSize(index, entry_size, offset + buf_len);
-
-  UpdateRank(true);
-
-  backend_->OnEvent(Stats::WRITE_DATA);
-  backend_->OnWrite(buf_len);
-
-  if (user_buffers_[index].get()) {
-    // Complete the operation locally.
-    user_buffers_[index]->Write(offset, buf, buf_len);
-    ReportIOTime(kWrite, start);
-    return buf_len;
-  }
-
-  Addr address(entry_.Data()->data_addr[index]);
-  if (offset + buf_len == 0) {
-    if (truncate) {
-      DCHECK(!address.is_initialized());
-    }
-    return 0;
-  }
-
-  File* file = GetBackingFile(address, index);
-  if (!file)
-    return net::ERR_FAILED;
-
-  size_t file_offset = offset;
-  if (address.is_block_file()) {
-    DCHECK_LE(offset + buf_len, kMaxBlockSize);
-    file_offset += address.start_block() * address.BlockSize() +
-                   kBlockHeaderSize;
-  } else if (truncate || (extending && !buf_len)) {
-    if (!file->SetLength(offset + buf_len))
-      return net::ERR_FAILED;
-  }
-
-  if (!buf_len)
-    return 0;
-
-  SyncCallback* io_callback = NULL;
-  if (callback) {
-    io_callback = new SyncCallback(this, buf, callback,
-                                   net::NetLog::TYPE_DISK_CACHE_WRITE_DATA);
-  }
-
-  bool completed;
-  if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
-                   &completed)) {
-    if (io_callback)
-      io_callback->Discard();
-    return net::ERR_FAILED;
-  }
-
-  if (io_callback && completed)
-    io_callback->Discard();
-
-  ReportIOTime(kWrite, start);
-  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
-}
-
 int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len,
                                   CompletionCallback* callback) {
   DCHECK(node_.Data()->dirty || read_only_);
@@ -827,8 +471,6 @@
   return sparse_->ReadyToUse(callback);
 }
 
-// ------------------------------------------------------------------------
-
 uint32 EntryImpl::GetHash() {
   return entry_.Data()->hash;
 }
@@ -1061,6 +703,362 @@
   return net_log_;
 }
 
+void EntryImpl::Doom() {
+  backend_->background_queue()->DoomEntryImpl(this);
+}
+
+void EntryImpl::Close() {
+  backend_->background_queue()->CloseEntryImpl(this);
+}
+
+std::string EntryImpl::GetKey() const {
+  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+  if (entry->Data()->key_len <= kMaxInternalKeyLength)
+    return std::string(entry->Data()->key);
+
+  // We keep a copy of the key so that we can always return it, even if the
+  // backend is disabled.
+  if (!key_.empty())
+    return key_;
+
+  Addr address(entry->Data()->long_key);
+  DCHECK(address.is_initialized());
+  size_t offset = 0;
+  if (address.is_block_file())
+    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
+
+  COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
+  File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
+                                                                kKeyFileIndex);
+
+  if (!key_file ||
+      !key_file->Read(WriteInto(&key_, entry->Data()->key_len + 1),
+                      entry->Data()->key_len + 1, offset))
+    key_.clear();
+  return key_;
+}
+
+Time EntryImpl::GetLastUsed() const {
+  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+  return Time::FromInternalValue(node->Data()->last_used);
+}
+
+Time EntryImpl::GetLastModified() const {
+  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
+  return Time::FromInternalValue(node->Data()->last_modified);
+}
+
+int32 EntryImpl::GetDataSize(int index) const {
+  if (index < 0 || index >= kNumStreams)
+    return 0;
+
+  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
+  return entry->Data()->data_size[index];
+}
+
+int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+                        net::CompletionCallback* callback) {
+  if (!callback)
+    return ReadDataImpl(index, offset, buf, buf_len, callback);
+
+  DCHECK(node_.Data()->dirty || read_only_);
+  if (index < 0 || index >= kNumStreams)
+    return net::ERR_INVALID_ARGUMENT;
+
+  int entry_size = entry_.Data()->data_size[index];
+  if (offset >= entry_size || offset < 0 || !buf_len)
+    return 0;
+
+  if (buf_len < 0)
+    return net::ERR_INVALID_ARGUMENT;
+
+  backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
+                                         callback);
+  return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+                         CompletionCallback* callback, bool truncate) {
+  if (!callback)
+    return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
+
+  DCHECK(node_.Data()->dirty || read_only_);
+  if (index < 0 || index >= kNumStreams)
+    return net::ERR_INVALID_ARGUMENT;
+
+  if (offset < 0 || buf_len < 0)
+    return net::ERR_INVALID_ARGUMENT;
+
+  backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
+                                          truncate, callback);
+  return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+                              net::CompletionCallback* callback) {
+  if (!callback)
+    return ReadSparseDataImpl(offset, buf, buf_len, callback);
+
+  backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
+                                               callback);
+  return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+                               net::CompletionCallback* callback) {
+  if (!callback)
+    return WriteSparseDataImpl(offset, buf, buf_len, callback);
+
+  backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
+                                                callback);
+  return net::ERR_IO_PENDING;
+}
+
+int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
+                                 CompletionCallback* callback) {
+  backend_->background_queue()->GetAvailableRange(this, offset, len, start,
+                                                  callback);
+  return net::ERR_IO_PENDING;
+}
+
+bool EntryImpl::CouldBeSparse() const {
+  if (sparse_.get())
+    return true;
+
+  scoped_ptr<SparseControl> sparse;
+  sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
+  return sparse->CouldBeSparse();
+}
+
+void EntryImpl::CancelSparseIO() {
+  backend_->background_queue()->CancelSparseIO(this);
+}
+
+int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
+  if (!sparse_.get())
+    return net::OK;
+
+  backend_->background_queue()->ReadyForSparseIO(this, callback);
+  return net::ERR_IO_PENDING;
+}
+
+// When an entry is deleted from the cache, we clean up all the data associated
+// with it for two reasons: to simplify the reuse of the block (we know that any
+// unused block is filled with zeros), and to simplify the handling of write /
+// read partial information from an entry (don't have to worry about returning
+// data related to a previous cache entry because the range was not fully
+// written before).
+EntryImpl::~EntryImpl() {
+  Log("~EntryImpl in");
+
+  // Save the sparse info to disk. This will generate IO for this entry and
+  // maybe for a child entry, so it is important to do it before deleting this
+  // entry.
+  sparse_.reset();
+
+  // Remove this entry from the list of open entries.
+  backend_->OnEntryDestroyBegin(entry_.address());
+
+  if (doomed_) {
+    DeleteEntryData(true);
+  } else {
+    net_log_.AddEvent(net::NetLog::TYPE_DISK_CACHE_CLOSE, NULL);
+    bool ret = true;
+    for (int index = 0; index < kNumStreams; index++) {
+      if (user_buffers_[index].get()) {
+        if (!(ret = Flush(index, 0)))
+          LOG(ERROR) << "Failed to save user data";
+      }
+      if (unreported_size_[index]) {
+        backend_->ModifyStorageSize(
+            entry_.Data()->data_size[index] - unreported_size_[index],
+            entry_.Data()->data_size[index]);
+      }
+    }
+
+    if (!ret) {
+      // There was a failure writing the actual data. Mark the entry as dirty.
+      int current_id = backend_->GetCurrentEntryId();
+      node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
+      node_.Store();
+    } else if (node_.HasData() && node_.Data()->dirty) {
+      node_.Data()->dirty = 0;
+      node_.Store();
+    }
+  }
+
+  Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
+  net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY, NULL);
+  backend_->OnEntryDestroyEnd();
+}
+
+// ------------------------------------------------------------------------
+
+int EntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf,
+                                int buf_len, CompletionCallback* callback) {
+  DCHECK(node_.Data()->dirty || read_only_);
+  DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
+  if (index < 0 || index >= kNumStreams)
+    return net::ERR_INVALID_ARGUMENT;
+
+  int entry_size = entry_.Data()->data_size[index];
+  if (offset >= entry_size || offset < 0 || !buf_len)
+    return 0;
+
+  if (buf_len < 0)
+    return net::ERR_INVALID_ARGUMENT;
+
+  TimeTicks start = TimeTicks::Now();
+
+  if (offset + buf_len > entry_size)
+    buf_len = entry_size - offset;
+
+  UpdateRank(false);
+
+  backend_->OnEvent(Stats::READ_DATA);
+  backend_->OnRead(buf_len);
+
+  Addr address(entry_.Data()->data_addr[index]);
+  int eof = address.is_initialized() ? entry_size : 0;
+  if (user_buffers_[index].get() &&
+      user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
+    // Complete the operation locally.
+    buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
+    ReportIOTime(kRead, start);
+    return buf_len;
+  }
+
+  address.set_value(entry_.Data()->data_addr[index]);
+  DCHECK(address.is_initialized());
+  if (!address.is_initialized())
+    return net::ERR_FAILED;
+
+  File* file = GetBackingFile(address, index);
+  if (!file)
+    return net::ERR_FAILED;
+
+  size_t file_offset = offset;
+  if (address.is_block_file()) {
+    DCHECK_LE(offset + buf_len, kMaxBlockSize);
+    file_offset += address.start_block() * address.BlockSize() +
+                   kBlockHeaderSize;
+  }
+
+  SyncCallback* io_callback = NULL;
+  if (callback) {
+    io_callback = new SyncCallback(this, buf, callback,
+                                   net::NetLog::TYPE_DISK_CACHE_READ_DATA);
+  }
+
+  bool completed;
+  if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
+    if (io_callback)
+      io_callback->Discard();
+    return net::ERR_FAILED;
+  }
+
+  if (io_callback && completed)
+    io_callback->Discard();
+
+  ReportIOTime(kRead, start);
+  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
+int EntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf,
+                                 int buf_len, CompletionCallback* callback,
+                                 bool truncate) {
+  DCHECK(node_.Data()->dirty || read_only_);
+  DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
+  if (index < 0 || index >= kNumStreams)
+    return net::ERR_INVALID_ARGUMENT;
+
+  if (offset < 0 || buf_len < 0)
+    return net::ERR_INVALID_ARGUMENT;
+
+  int max_file_size = backend_->MaxFileSize();
+
+  // offset or buf_len could be negative numbers.
+  if (offset > max_file_size || buf_len > max_file_size ||
+      offset + buf_len > max_file_size) {
+    int size = offset + buf_len;
+    if (size <= max_file_size)
+      size = kint32max;
+    backend_->TooMuchStorageRequested(size);
+    return net::ERR_FAILED;
+  }
+
+  TimeTicks start = TimeTicks::Now();
+
+  // Read the size at this point (it may change inside prepare).
+  int entry_size = entry_.Data()->data_size[index];
+  bool extending = entry_size < offset + buf_len;
+  truncate = truncate && entry_size > offset + buf_len;
+  Trace("To PrepareTarget 0x%x", entry_.address().value());
+  if (!PrepareTarget(index, offset, buf_len, truncate))
+    return net::ERR_FAILED;
+
+  Trace("From PrepareTarget 0x%x", entry_.address().value());
+  if (extending || truncate)
+    UpdateSize(index, entry_size, offset + buf_len);
+
+  UpdateRank(true);
+
+  backend_->OnEvent(Stats::WRITE_DATA);
+  backend_->OnWrite(buf_len);
+
+  if (user_buffers_[index].get()) {
+    // Complete the operation locally.
+    user_buffers_[index]->Write(offset, buf, buf_len);
+    ReportIOTime(kWrite, start);
+    return buf_len;
+  }
+
+  Addr address(entry_.Data()->data_addr[index]);
+  if (offset + buf_len == 0) {
+    if (truncate) {
+      DCHECK(!address.is_initialized());
+    }
+    return 0;
+  }
+
+  File* file = GetBackingFile(address, index);
+  if (!file)
+    return net::ERR_FAILED;
+
+  size_t file_offset = offset;
+  if (address.is_block_file()) {
+    DCHECK_LE(offset + buf_len, kMaxBlockSize);
+    file_offset += address.start_block() * address.BlockSize() +
+                   kBlockHeaderSize;
+  } else if (truncate || (extending && !buf_len)) {
+    if (!file->SetLength(offset + buf_len))
+      return net::ERR_FAILED;
+  }
+
+  if (!buf_len)
+    return 0;
+
+  SyncCallback* io_callback = NULL;
+  if (callback) {
+    io_callback = new SyncCallback(this, buf, callback,
+                                   net::NetLog::TYPE_DISK_CACHE_WRITE_DATA);
+  }
+
+  bool completed;
+  if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
+                   &completed)) {
+    if (io_callback)
+      io_callback->Discard();
+    return net::ERR_FAILED;
+  }
+
+  if (io_callback && completed)
+    io_callback->Discard();
+
+  ReportIOTime(kWrite, start);
+  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
+}
+
 // ------------------------------------------------------------------------
 
 bool EntryImpl::CreateDataBlock(int index, int size) {
diff --git a/net/disk_cache/entry_impl.h b/net/disk_cache/entry_impl.h
index d24e861..e56fc6b 100644
--- a/net/disk_cache/entry_impl.h
+++ b/net/disk_cache/entry_impl.h
@@ -33,28 +33,6 @@
 
   EntryImpl(BackendImpl* backend, Addr address, bool read_only);
 
-  // Entry interface.
-  virtual void Doom();
-  virtual void Close();
-  virtual std::string GetKey() const;
-  virtual base::Time GetLastUsed() const;
-  virtual base::Time GetLastModified() const;
-  virtual int32 GetDataSize(int index) const;
-  virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
-                       net::CompletionCallback* completion_callback);
-  virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
-                        net::CompletionCallback* completion_callback,
-                        bool truncate);
-  virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
-                             net::CompletionCallback* completion_callback);
-  virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
-                              net::CompletionCallback* completion_callback);
-  virtual int GetAvailableRange(int64 offset, int len, int64* start,
-                                CompletionCallback* callback);
-  virtual bool CouldBeSparse() const;
-  virtual void CancelSparseIO();
-  virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
-
   // Background implementation of the Entry interface.
   void DoomImpl();
   int ReadDataImpl(int index, int offset, net::IOBuffer* buf, int buf_len,
@@ -138,6 +116,28 @@
 
   const net::BoundNetLog& net_log() const;
 
+  // Entry interface.
+  virtual void Doom();
+  virtual void Close();
+  virtual std::string GetKey() const;
+  virtual base::Time GetLastUsed() const;
+  virtual base::Time GetLastModified() const;
+  virtual int32 GetDataSize(int index) const;
+  virtual int ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
+                       net::CompletionCallback* completion_callback);
+  virtual int WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
+                        net::CompletionCallback* completion_callback,
+                        bool truncate);
+  virtual int ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+                             net::CompletionCallback* completion_callback);
+  virtual int WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
+                              net::CompletionCallback* completion_callback);
+  virtual int GetAvailableRange(int64 offset, int len, int64* start,
+                                CompletionCallback* callback);
+  virtual bool CouldBeSparse() const;
+  virtual void CancelSparseIO();
+  virtual int ReadyForSparseIO(net::CompletionCallback* completion_callback);
+
  private:
   enum {
      kNumStreams = 3
diff --git a/net/disk_cache/file.h b/net/disk_cache/file.h
index c266311..129c3dd 100644
--- a/net/disk_cache/file.h
+++ b/net/disk_cache/file.h
@@ -18,11 +18,11 @@
 // This interface is used to support asynchronous ReadData and WriteData calls.
 class FileIOCallback {
  public:
+  virtual ~FileIOCallback() {}
+
   // Notified of the actual number of bytes read or written. This value is
   // negative if an error occurred.
   virtual void OnFileIOComplete(int bytes_copied) = 0;
-
-  virtual ~FileIOCallback() {}
 };
 
 // Simple wrapper around a file that allows asynchronous operations.
diff --git a/net/disk_cache/mem_backend_impl.cc b/net/disk_cache/mem_backend_impl.cc
index 288a66e..1122bca 100644
--- a/net/disk_cache/mem_backend_impl.cc
+++ b/net/disk_cache/mem_backend_impl.cc
@@ -28,6 +28,17 @@
 
 namespace disk_cache {
 
+MemBackendImpl::MemBackendImpl() : max_size_(0), current_size_(0) {}
+
+MemBackendImpl::~MemBackendImpl() {
+  EntryMap::iterator it = entries_.begin();
+  while (it != entries_.end()) {
+    it->second->Doom();
+    it = entries_.begin();
+  }
+  DCHECK(!current_size_);
+}
+
 // Static.
 Backend* MemBackendImpl::CreateBackend(int max_bytes) {
   MemBackendImpl* cache = new MemBackendImpl();
@@ -62,17 +73,6 @@
   return true;
 }
 
-MemBackendImpl::MemBackendImpl() : max_size_(0), current_size_(0) {}
-
-MemBackendImpl::~MemBackendImpl() {
-  EntryMap::iterator it = entries_.begin();
-  while (it != entries_.end()) {
-    it->second->Doom();
-    it = entries_.begin();
-  }
-  DCHECK(!current_size_);
-}
-
 bool MemBackendImpl::SetMaxSize(int max_bytes) {
   COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
   if (max_bytes < 0)
@@ -86,10 +86,107 @@
   return true;
 }
 
+void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
+  // Only parent entries can be passed into this method.
+  DCHECK(entry->type() == MemEntryImpl::kParentEntry);
+
+  rankings_.Remove(entry);
+  EntryMap::iterator it = entries_.find(entry->GetKey());
+  if (it != entries_.end())
+    entries_.erase(it);
+  else
+    NOTREACHED();
+
+  entry->InternalDoom();
+}
+
+void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
+  rankings_.UpdateRank(node);
+}
+
+void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
+  if (old_size >= new_size)
+    SubstractStorageSize(old_size - new_size);
+  else
+    AddStorageSize(new_size - old_size);
+}
+
+int MemBackendImpl::MaxFileSize() const {
+  return max_size_ / 8;
+}
+
+void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
+  rankings_.Insert(entry);
+}
+
+void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
+  rankings_.Remove(entry);
+}
+
 int32 MemBackendImpl::GetEntryCount() const {
   return static_cast<int32>(entries_.size());
 }
 
+int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
+                              CompletionCallback* callback) {
+  if (OpenEntry(key, entry))
+    return net::OK;
+
+  return net::ERR_FAILED;
+}
+
+int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
+                                CompletionCallback* callback) {
+  if (CreateEntry(key, entry))
+    return net::OK;
+
+  return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntry(const std::string& key,
+                              CompletionCallback* callback) {
+  if (DoomEntry(key))
+    return net::OK;
+
+  return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomAllEntries(CompletionCallback* callback) {
+  if (DoomAllEntries())
+    return net::OK;
+
+  return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
+                                       const base::Time end_time,
+                                       CompletionCallback* callback) {
+  if (DoomEntriesBetween(initial_time, end_time))
+    return net::OK;
+
+  return net::ERR_FAILED;
+}
+
+int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
+                                     CompletionCallback* callback) {
+  if (DoomEntriesSince(initial_time))
+    return net::OK;
+
+  return net::ERR_FAILED;
+}
+
+int MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
+                                  CompletionCallback* callback) {
+  if (OpenNextEntry(iter, next_entry))
+    return net::OK;
+
+  return net::ERR_FAILED;
+}
+
+void MemBackendImpl::EndEnumeration(void** iter) {
+  *iter = NULL;
+}
+
 bool MemBackendImpl::OpenEntry(const std::string& key, Entry** entry) {
   EntryMap::iterator it = entries_.find(key);
   if (it == entries_.end())
@@ -101,14 +198,6 @@
   return true;
 }
 
-int MemBackendImpl::OpenEntry(const std::string& key, Entry** entry,
-                              CompletionCallback* callback) {
-  if (OpenEntry(key, entry))
-    return net::OK;
-
-  return net::ERR_FAILED;
-}
-
 bool MemBackendImpl::CreateEntry(const std::string& key, Entry** entry) {
   EntryMap::iterator it = entries_.find(key);
   if (it != entries_.end())
@@ -127,14 +216,6 @@
   return true;
 }
 
-int MemBackendImpl::CreateEntry(const std::string& key, Entry** entry,
-                                CompletionCallback* callback) {
-  if (CreateEntry(key, entry))
-    return net::OK;
-
-  return net::ERR_FAILED;
-}
-
 bool MemBackendImpl::DoomEntry(const std::string& key) {
   Entry* entry;
   if (!OpenEntry(key, &entry))
@@ -145,40 +226,11 @@
   return true;
 }
 
-int MemBackendImpl::DoomEntry(const std::string& key,
-                              CompletionCallback* callback) {
-  if (DoomEntry(key))
-    return net::OK;
-
-  return net::ERR_FAILED;
-}
-
-void MemBackendImpl::InternalDoomEntry(MemEntryImpl* entry) {
-  // Only parent entries can be passed into this method.
-  DCHECK(entry->type() == MemEntryImpl::kParentEntry);
-
-  rankings_.Remove(entry);
-  EntryMap::iterator it = entries_.find(entry->GetKey());
-  if (it != entries_.end())
-    entries_.erase(it);
-  else
-    NOTREACHED();
-
-  entry->InternalDoom();
-}
-
 bool MemBackendImpl::DoomAllEntries() {
   TrimCache(true);
   return true;
 }
 
-int MemBackendImpl::DoomAllEntries(CompletionCallback* callback) {
-  if (DoomAllEntries())
-    return net::OK;
-
-  return net::ERR_FAILED;
-}
-
 bool MemBackendImpl::DoomEntriesBetween(const Time initial_time,
                                         const Time end_time) {
   if (end_time.is_null())
@@ -205,15 +257,6 @@
   return true;
 }
 
-int MemBackendImpl::DoomEntriesBetween(const base::Time initial_time,
-                                       const base::Time end_time,
-                                       CompletionCallback* callback) {
-  if (DoomEntriesBetween(initial_time, end_time))
-    return net::OK;
-
-  return net::ERR_FAILED;
-}
-
 bool MemBackendImpl::DoomEntriesSince(const Time initial_time) {
   for (;;) {
     // Get the entry in the front.
@@ -226,14 +269,6 @@
   }
 }
 
-int MemBackendImpl::DoomEntriesSince(const base::Time initial_time,
-                                     CompletionCallback* callback) {
-  if (DoomEntriesSince(initial_time))
-    return net::OK;
-
-  return net::ERR_FAILED;
-}
-
 bool MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry) {
   MemEntryImpl* current = reinterpret_cast<MemEntryImpl*>(*iter);
   MemEntryImpl* node = rankings_.GetNext(current);
@@ -251,18 +286,6 @@
   return NULL != node;
 }
 
-int MemBackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
-                                  CompletionCallback* callback) {
-  if (OpenNextEntry(iter, next_entry))
-    return net::OK;
-
-  return net::ERR_FAILED;
-}
-
-void MemBackendImpl::EndEnumeration(void** iter) {
-  *iter = NULL;
-}
-
 void MemBackendImpl::TrimCache(bool empty) {
   MemEntryImpl* next = rankings_.GetPrev(NULL);
 
@@ -293,27 +316,4 @@
   DCHECK(current_size_ >= 0);
 }
 
-void MemBackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
-  if (old_size >= new_size)
-    SubstractStorageSize(old_size - new_size);
-  else
-    AddStorageSize(new_size - old_size);
-}
-
-void MemBackendImpl::UpdateRank(MemEntryImpl* node) {
-  rankings_.UpdateRank(node);
-}
-
-int MemBackendImpl::MaxFileSize() const {
-  return max_size_ / 8;
-}
-
-void MemBackendImpl::InsertIntoRankingList(MemEntryImpl* entry) {
-  rankings_.Insert(entry);
-}
-
-void MemBackendImpl::RemoveFromRankingList(MemEntryImpl* entry) {
-  rankings_.Remove(entry);
-}
-
 }  // namespace disk_cache
diff --git a/net/disk_cache/mem_backend_impl.h b/net/disk_cache/mem_backend_impl.h
index c78c6701..9b920ac5 100644
--- a/net/disk_cache/mem_backend_impl.h
+++ b/net/disk_cache/mem_backend_impl.h
@@ -34,25 +34,6 @@
   // Performs general initialization for this current instance of the cache.
   bool Init();
 
-  // Backend interface.
-  virtual int32 GetEntryCount() const;
-  virtual int OpenEntry(const std::string& key, Entry** entry,
-                        CompletionCallback* callback);
-  virtual int CreateEntry(const std::string& key, Entry** entry,
-                          CompletionCallback* callback);
-  virtual int DoomEntry(const std::string& key, CompletionCallback* callback);
-  virtual int DoomAllEntries(CompletionCallback* callback);
-  virtual int DoomEntriesBetween(const base::Time initial_time,
-                                 const base::Time end_time,
-                                 CompletionCallback* callback);
-  virtual int DoomEntriesSince(const base::Time initial_time,
-                               CompletionCallback* callback);
-  virtual int OpenNextEntry(void** iter, Entry** next_entry,
-                            CompletionCallback* callback);
-  virtual void EndEnumeration(void** iter);
-  virtual void GetStats(
-      std::vector<std::pair<std::string, std::string> >* stats) {}
-
   // Sets the maximum size for the total amount of data stored by this instance.
   bool SetMaxSize(int max_bytes);
 
@@ -77,7 +58,28 @@
   // MemEntryImpl to remove a child entry from the ranking list.
   void RemoveFromRankingList(MemEntryImpl* entry);
 
+  // Backend interface.
+  virtual int32 GetEntryCount() const;
+  virtual int OpenEntry(const std::string& key, Entry** entry,
+                        CompletionCallback* callback);
+  virtual int CreateEntry(const std::string& key, Entry** entry,
+                          CompletionCallback* callback);
+  virtual int DoomEntry(const std::string& key, CompletionCallback* callback);
+  virtual int DoomAllEntries(CompletionCallback* callback);
+  virtual int DoomEntriesBetween(const base::Time initial_time,
+                                 const base::Time end_time,
+                                 CompletionCallback* callback);
+  virtual int DoomEntriesSince(const base::Time initial_time,
+                               CompletionCallback* callback);
+  virtual int OpenNextEntry(void** iter, Entry** next_entry,
+                            CompletionCallback* callback);
+  virtual void EndEnumeration(void** iter);
+  virtual void GetStats(
+      std::vector<std::pair<std::string, std::string> >* stats) {}
+
  private:
+  typedef base::hash_map<std::string, MemEntryImpl*> EntryMap;
+
   // Old Backend interface.
   bool OpenEntry(const std::string& key, Entry** entry);
   bool CreateEntry(const std::string& key, Entry** entry);
@@ -97,8 +99,6 @@
   void AddStorageSize(int32 bytes);
   void SubstractStorageSize(int32 bytes);
 
-  typedef base::hash_map<std::string, MemEntryImpl*> EntryMap;
-
   EntryMap entries_;
   MemRankings rankings_;  // Rankings to be able to trim the cache.
   int32 max_size_;        // Maximum data size for this instance.
diff --git a/net/disk_cache/rankings.h b/net/disk_cache/rankings.h
index 6066fbf..baf777e 100644
--- a/net/disk_cache/rankings.h
+++ b/net/disk_cache/rankings.h
@@ -93,11 +93,12 @@
   // If we have multiple lists, we have to iterate through all at the same time.
   // This structure keeps track of where we are on the iteration.
   struct Iterator {
+    explicit Iterator(Rankings* rankings);
+    ~Iterator();
+
     List list;                     // Which entry was returned to the user.
     CacheRankingsBlock* nodes[3];  // Nodes on the first three lists.
     Rankings* my_rankings;
-    explicit Iterator(Rankings* rankings);
-    ~Iterator();
   };
 
   Rankings();
diff --git a/net/http/http_auth_cache.h b/net/http/http_auth_cache.h
index 707288c..b6c382c 100644
--- a/net/http/http_auth_cache.h
+++ b/net/http/http_auth_cache.h
@@ -28,6 +28,13 @@
  public:
   class Entry;
 
+  // Prevent unbounded memory growth. These are safeguards for abuse; it is
+  // not expected that the limits will be reached in ordinary usage.
+  // This also defines the worst-case lookup times (which grow linearly
+  // with number of elements in the cache).
+  enum { kMaxNumPathsPerRealmEntry = 10 };
+  enum { kMaxNumRealmEntries = 10 };
+
   HttpAuthCache();
   ~HttpAuthCache();
 
@@ -93,13 +100,6 @@
                             const std::string& scheme,
                             const std::string& auth_challenge);
 
-  // Prevent unbounded memory growth. These are safeguards for abuse; it is
-  // not expected that the limits will be reached in ordinary usage.
-  // This also defines the worst-case lookup times (which grow linearly
-  // with number of elements in the cache).
-  enum { kMaxNumPathsPerRealmEntry = 10 };
-  enum { kMaxNumRealmEntries = 10 };
-
  private:
   typedef std::list<Entry> EntryList;
   EntryList entries_;
@@ -108,6 +108,8 @@
 // An authentication realm entry.
 class HttpAuthCache::Entry {
  public:
+  ~Entry();
+
   const GURL& origin() const {
     return origin_;
   }
@@ -143,13 +145,13 @@
 
   void UpdateStaleChallenge(const std::string& auth_challenge);
 
-  ~Entry();
-
  private:
   friend class HttpAuthCache;
   FRIEND_TEST_ALL_PREFIXES(HttpAuthCacheTest, AddPath);
   FRIEND_TEST_ALL_PREFIXES(HttpAuthCacheTest, AddToExistingEntry);
 
+  typedef std::list<std::string> PathList;
+
   Entry();
 
   // Adds a path defining the realm's protection space. If the path is
@@ -172,7 +174,6 @@
   int nonce_count_;
 
   // List of paths that define the realm's protection space.
-  typedef std::list<std::string> PathList;
   PathList paths_;
 };
 
diff --git a/net/http/http_response_headers.h b/net/http/http_response_headers.h
index aacd35a..2b556b3 100644
--- a/net/http/http_response_headers.h
+++ b/net/http/http_response_headers.h
@@ -27,6 +27,16 @@
 class HttpResponseHeaders
     : public base::RefCountedThreadSafe<HttpResponseHeaders> {
  public:
+  // Persist options.
+  typedef int PersistOptions;
+  static const PersistOptions PERSIST_RAW = -1;  // Raw, unparsed headers.
+  static const PersistOptions PERSIST_ALL = 0;  // Parsed headers.
+  static const PersistOptions PERSIST_SANS_COOKIES = 1 << 0;
+  static const PersistOptions PERSIST_SANS_CHALLENGES = 1 << 1;
+  static const PersistOptions PERSIST_SANS_HOP_BY_HOP = 1 << 2;
+  static const PersistOptions PERSIST_SANS_NON_CACHEABLE = 1 << 3;
+  static const PersistOptions PERSIST_SANS_RANGES = 1 << 4;
+
   // Parses the given raw_headers.  raw_headers should be formatted thus:
   // includes the http status response line, each line is \0-terminated, and
   // it's terminated by an empty line (ie, 2 \0s in a row).
@@ -45,16 +55,6 @@
   // be passed to the pickle's various Read* methods.
   HttpResponseHeaders(const Pickle& pickle, void** pickle_iter);
 
-  // Persist options.
-  typedef int PersistOptions;
-  static const PersistOptions PERSIST_RAW = -1;  // Raw, unparsed headers.
-  static const PersistOptions PERSIST_ALL = 0;  // Parsed headers.
-  static const PersistOptions PERSIST_SANS_COOKIES = 1 << 0;
-  static const PersistOptions PERSIST_SANS_CHALLENGES = 1 << 1;
-  static const PersistOptions PERSIST_SANS_HOP_BY_HOP = 1 << 2;
-  static const PersistOptions PERSIST_SANS_NON_CACHEABLE = 1 << 3;
-  static const PersistOptions PERSIST_SANS_RANGES = 1 << 4;
-
   // Appends a representation of this object to the given pickle.
   // The options argument can be a combination of PersistOptions.
   void Persist(Pickle* pickle, PersistOptions options);
@@ -253,6 +253,19 @@
 
   typedef base::hash_set<std::string> HeaderSet;
 
+  // The members of this structure point into raw_headers_.
+  struct ParsedHeader {
+    std::string::const_iterator name_begin;
+    std::string::const_iterator name_end;
+    std::string::const_iterator value_begin;
+    std::string::const_iterator value_end;
+
+    // A header "continuation" contains only a subsequent value for the
+    // preceding header.  (Header values are comma separated.)
+    bool is_continuation() const { return name_begin == name_end; }
+  };
+  typedef std::vector<ParsedHeader> HeaderList;
+
   HttpResponseHeaders();
   ~HttpResponseHeaders();
 
@@ -319,19 +332,6 @@
   // Adds the set of content range response headers.
   static void AddHopContentRangeHeaders(HeaderSet* header_names);
 
-  // The members of this structure point into raw_headers_.
-  struct ParsedHeader {
-    std::string::const_iterator name_begin;
-    std::string::const_iterator name_end;
-    std::string::const_iterator value_begin;
-    std::string::const_iterator value_end;
-
-    // A header "continuation" contains only a subsequent value for the
-    // preceding header.  (Header values are comma separated.)
-    bool is_continuation() const { return name_begin == name_end; }
-  };
-  typedef std::vector<ParsedHeader> HeaderList;
-
   // We keep a list of ParsedHeader objects.  These tell us where to locate the
   // header-value pairs within raw_headers_.
   HeaderList parsed_;
diff --git a/net/http/http_response_info.h b/net/http/http_response_info.h
index cd66982c..4fbea3d 100644
--- a/net/http/http_response_info.h
+++ b/net/http/http_response_info.h
@@ -28,6 +28,14 @@
   // Even though we could get away with the copy ctor and default operator=,
   // that would prevent us from doing a bunch of forward declaration.
 
+  // Initializes from the representation stored in the given pickle.
+  bool InitFromPickle(const Pickle& pickle, bool* response_truncated);
+
+  // Call this method to persist the response info.
+  void Persist(Pickle* pickle,
+               bool skip_transient_headers,
+               bool response_truncated) const;
+
   // The following is only defined if the request_time member is set.
   // If this response was resurrected from cache, then this bool is set, and
   // request_time may corresponds to a time "far" in the past.  Note that
@@ -81,14 +89,6 @@
 
   // Any metadata asociated with this resource's cached data.
   scoped_refptr<IOBufferWithSize> metadata;
-
-  // Initializes from the representation stored in the given pickle.
-  bool InitFromPickle(const Pickle& pickle, bool* response_truncated);
-
-  // Call this method to persist the response info.
-  void Persist(Pickle* pickle,
-               bool skip_transient_headers,
-               bool response_truncated) const;
 };
 
 }  // namespace net
diff --git a/net/http/http_util.cc b/net/http/http_util.cc
index 1f57d70..bf561362 100644
--- a/net/http/http_util.cc
+++ b/net/http/http_util.cc
@@ -806,6 +806,8 @@
       value_is_quoted_(false) {
 }
 
+HttpUtil::NameValuePairsIterator::~NameValuePairsIterator() {}
+
 // We expect properties to be formatted as one of:
 //   name="value"
 //   name='value'
diff --git a/net/http/http_util.h b/net/http/http_util.h
index 2f5bd85..3da1635 100644
--- a/net/http/http_util.h
+++ b/net/http/http_util.h
@@ -283,6 +283,7 @@
     NameValuePairsIterator(std::string::const_iterator begin,
                            std::string::const_iterator end,
                            char delimiter);
+    ~NameValuePairsIterator();
 
     // Advances the iterator to the next pair, if any.  Returns true if there
     // is a next pair.  Use name* and value* methods to access the resultant
diff --git a/net/proxy/proxy_service.h b/net/proxy/proxy_service.h
index f9424d7..75ac3c98 100644
--- a/net/proxy/proxy_service.h
+++ b/net/proxy/proxy_service.h
@@ -277,6 +277,9 @@
                               int result_code,
                               const BoundNetLog& net_log);
 
+  // Start initialization using |fetched_config_|.
+  void InitializeUsingLastFetchedConfig();
+
   // NetworkChangeNotifier::Observer
   // When this is called, we re-fetch PAC scripts and re-run WPAD.
   virtual void OnIPAddressChanged();
@@ -284,9 +287,6 @@
   // ProxyConfigService::Observer
   virtual void OnProxyConfigChanged(const ProxyConfig& config);
 
-  // Start initialization using |fetched_config_|.
-  void InitializeUsingLastFetchedConfig();
-
   scoped_ptr<ProxyConfigService> config_service_;
   scoped_ptr<ProxyResolver> resolver_;
 
diff --git a/net/socket/dns_cert_provenance_checker.h b/net/socket/dns_cert_provenance_checker.h
index 810e272..74e8768 100644
--- a/net/socket/dns_cert_provenance_checker.h
+++ b/net/socket/dns_cert_provenance_checker.h
@@ -27,10 +27,10 @@
         const std::vector<std::string>& der_certs) = 0;
   };
 
-  virtual void Shutdown() = 0;
-
   virtual ~DnsCertProvenanceChecker();
 
+  virtual void Shutdown() = 0;
+
   // DoAsyncVerification starts an asynchronous check for the given certificate
   // chain. It must be run on the network thread.
   virtual void DoAsyncVerification(
diff --git a/net/socket/socket_test_util.h b/net/socket/socket_test_util.h
index 73dd07c..91f8fc9 100644
--- a/net/socket/socket_test_util.h
+++ b/net/socket/socket_test_util.h
@@ -168,12 +168,6 @@
                            MockWrite* writes, size_t writes_count);
   virtual ~StaticSocketDataProvider();
 
-  // SocketDataProvider methods:
-  virtual MockRead GetNextRead();
-  virtual MockWriteResult OnWrite(const std::string& data);
-  virtual void Reset();
-  virtual void CompleteRead() {}
-
   // These functions get access to the next available read and write data.
   const MockRead& PeekRead() const;
   const MockWrite& PeekWrite() const;
@@ -188,6 +182,12 @@
   bool at_read_eof() const { return read_index_ >= read_count_; }
   bool at_write_eof() const { return write_index_ >= write_count_; }
 
+  // SocketDataProvider methods:
+  virtual MockRead GetNextRead();
+  virtual MockWriteResult OnWrite(const std::string& data);
+  virtual void Reset();
+  virtual void CompleteRead() {}
+
  private:
   MockRead* reads_;
   size_t read_index_;
diff --git a/net/socket_stream/socket_stream.h b/net/socket_stream/socket_stream.h
index f4855433..b2afd1b 100644
--- a/net/socket_stream/socket_stream.h
+++ b/net/socket_stream/socket_stream.h
@@ -156,6 +156,11 @@
   Delegate* delegate_;
 
  private:
+  friend class WebSocketThrottleTest;
+
+  typedef std::map<const void*, linked_ptr<UserData> > UserDataMap;
+  typedef std::deque< scoped_refptr<IOBufferWithSize> > PendingDataQueue;
+
   class RequestHeaders : public IOBuffer {
    public:
     RequestHeaders() : IOBuffer() {}
@@ -212,9 +217,8 @@
     kSOCKSProxy,  // If using a SOCKS proxy
   };
 
-  typedef std::deque< scoped_refptr<IOBufferWithSize> > PendingDataQueue;
-
-  friend class WebSocketThrottleTest;
+  // Use the same number as HttpNetworkTransaction::kMaxHeaderBufSize.
+  enum { kMaxTunnelResponseHeadersSize = 32768 };  // 32 kilobytes.
 
   // Copies the given addrinfo list in |addresses_|.
   // Used for WebSocketThrottleTest.
@@ -269,7 +273,6 @@
   int max_pending_send_allowed_;
   scoped_refptr<URLRequestContext> context_;
 
-  typedef std::map<const void*, linked_ptr<UserData> > UserDataMap;
   UserDataMap user_data_;
 
   State next_state_;
@@ -295,9 +298,6 @@
   int tunnel_response_headers_capacity_;
   int tunnel_response_headers_len_;
 
-  // Use the same number as HttpNetworkTransaction::kMaxHeaderBufSize.
-  enum { kMaxTunnelResponseHeadersSize = 32768 };  // 32 kilobytes.
-
   scoped_ptr<SingleRequestHostResolver> resolver_;
   AddressList addresses_;
   scoped_ptr<ClientSocket> socket_;
diff --git a/net/spdy/spdy_protocol.h b/net/spdy/spdy_protocol.h
index 9834a11..2389152e 100644
--- a/net/spdy/spdy_protocol.h
+++ b/net/spdy/spdy_protocol.h
@@ -302,9 +302,6 @@
 
 // A structure for the 8 bit flags and 24 bit ID fields.
 union SettingsFlagsAndId {
-  uint8 flags_[4];  // 8 bits
-  uint32 id_;       // 24 bits
-
   SettingsFlagsAndId(uint32 val) : id_(val) {}
   uint8 flags() const { return flags_[0]; }
   void set_flags(uint8 flags) { flags_[0] = flags; }
@@ -314,6 +311,9 @@
     id = htonl(id & kSettingsIdMask);
     id_ = flags() | id;
   }
+
+  uint8 flags_[4];  // 8 bits
+  uint32 id_;       // 24 bits
 };
 
 #pragma pack(pop)
diff --git a/net/url_request/url_request_job_metrics.h b/net/url_request/url_request_job_metrics.h
index 15bbaf6..6552026 100644
--- a/net/url_request/url_request_job_metrics.h
+++ b/net/url_request/url_request_job_metrics.h
@@ -23,6 +23,9 @@
   URLRequestJobMetrics();
   ~URLRequestJobMetrics();
 
+  // Append the text report of the frame loading to the input string.
+  void AppendText(std::wstring* text);
+
   // The original url the job has been created for.
   scoped_ptr<GURL> original_url_;
 
@@ -44,9 +47,6 @@
 
   // Final status of the job.
   bool success_;
-
-  // Append the text report of the frame loading to the input string.
-  void AppendText(std::wstring* text);
 };
 
 }  // namespace net
diff --git a/net/websockets/websocket_handshake_handler.cc b/net/websockets/websocket_handshake_handler.cc
index 57f01e98..734b93ec 100644
--- a/net/websockets/websocket_handshake_handler.cc
+++ b/net/websockets/websocket_handshake_handler.cc
@@ -284,6 +284,8 @@
     : original_header_length_(0) {
 }
 
+WebSocketHandshakeResponseHandler::~WebSocketHandshakeResponseHandler() {}
+
 size_t WebSocketHandshakeResponseHandler::ParseRawResponse(
     const char* data, int length) {
   DCHECK_GT(length, 0);
diff --git a/net/websockets/websocket_handshake_handler.h b/net/websockets/websocket_handshake_handler.h
index 54554449..b191dba 100644
--- a/net/websockets/websocket_handshake_handler.h
+++ b/net/websockets/websocket_handshake_handler.h
@@ -71,7 +71,7 @@
 class WebSocketHandshakeResponseHandler {
  public:
   WebSocketHandshakeResponseHandler();
-  ~WebSocketHandshakeResponseHandler() {}
+  ~WebSocketHandshakeResponseHandler();
 
   // Parses WebSocket handshake response from WebSocket server.
   // Returns number of bytes in |data| used for WebSocket handshake response
diff --git a/net/websockets/websocket_job.h b/net/websockets/websocket_job.h
index f19653d2..d9a6e71 100644
--- a/net/websockets/websocket_job.h
+++ b/net/websockets/websocket_job.h
@@ -39,10 +39,11 @@
     CLOSING = 2,
     CLOSED = 3,
   };
-  static void EnsureInit();
 
   explicit WebSocketJob(SocketStream::Delegate* delegate);
 
+  static void EnsureInit();
+
   State state() const { return state_; }
   virtual void Connect();
   virtual bool SendData(const char* data, int len);