فهرست منبع

Create the Next Generation of depot_tools. Eh.
Review URL: http://codereview.chromium.org/92087

git-svn-id: svn://svn.chromium.org/chrome/trunk/tools/depot_tools@14349 0039d316-1c4b-4281-b951-d872f2087c98

maruel@google.com 16 سال پیش
کامیت
fb2b8eb2e2
41فایلهای تغییر یافته به همراه11450 افزوده شده و 0 حذف شده
  1. 27 0
      LICENSE
  2. 39 0
      README
  3. 52 0
      README.git-cl
  4. 99 0
      README.git-cl.codereview
  5. 21 0
      bootstrap/gclient.bat
  6. 18 0
      bootstrap/gclient.sh
  7. 504 0
      bootstrap/win/7z.copying.txt
  8. 30 0
      bootstrap/win/7z.license.txt
  9. 42 0
      bootstrap/win/7z.readme.txt
  10. BIN
      bootstrap/win/7za.exe
  11. 16 0
      bootstrap/win/README.google
  12. 1 0
      bootstrap/win/python.bat
  13. 1 0
      bootstrap/win/svn.bat
  14. BIN
      bootstrap/win/wget.exe
  15. 65 0
      bootstrap/win/win_tools.bat
  16. 53 0
      chrome-update-create-task.bat
  17. 10 0
      chrome-update.bat
  18. 170 0
      chrome-update.py
  19. 2725 0
      cpplint.py
  20. 5 0
      gcl
  21. 1 0
      gcl.bat
  22. 1122 0
      gcl.py
  23. 21 0
      gclient
  24. 36 0
      gclient.bat
  25. 1649 0
      gclient.py
  26. 682 0
      git-cl.py
  27. 184 0
      git-try.py
  28. 9 0
      hammer
  29. 23 0
      hammer.bat
  30. 715 0
      presubmit.py
  31. 102 0
      presubmit_canned_checks.py
  32. 8 0
      profile.xml
  33. 5 0
      revert
  34. 1 0
      revert.bat
  35. 284 0
      revert.py
  36. 41 0
      tests/abandon.sh
  37. 62 0
      tests/basic.sh
  38. 699 0
      tests/presubmit_unittest.py
  39. 50 0
      tests/test-lib.sh
  40. 505 0
      trychange.py
  41. 1373 0
      upload.py

+ 27 - 0
LICENSE

@@ -0,0 +1,27 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 39 - 0
README

@@ -0,0 +1,39 @@
+This package contains tools for working with Chromium development.
+
+The "gclient" wrapper knows how to keep this repository updated to
+the latest versions of these tools as found at:
+
+    http://src.chromium.org/svn/trunk/tools/depot_tools
+
+This package contains:
+
+  chrome-update-create-task.bat
+    Creates a scheduled task to do an automatic local chromium build every day.
+
+  gcl
+    A tool for uploading and managing code reviews on the Chromium
+    project, using the Rietveld code review tool.  More info at:
+    http://code.google.com/p/rietveld/
+   
+  gclient
+    A script for managing a workspace with modular dependencies that
+    are each checked out independently from different repositories.
+    More info at:
+    http://code.google.com/p/gclient/
+
+  hammer
+    A wrapper script for building Chromium with the SCons software
+    construction tool.  More info at:
+    http://www.scons.org/
+
+  revert
+    A small tool to quickly revert a change.
+
+
+Note: svn and python will be installed automatically if not accessible (on
+Windows only).
+
+To update this distribution manually, run bootstrap\update.bat on Windows,
+or bootstrap/update.sh on Linux or Mac.
+
+To disable automatic updating, set the environment variable DEPOT_TOOLS_UPDATE=1

+ 52 - 0
README.git-cl

@@ -0,0 +1,52 @@
+# git-cl -- a git-command for integrating reviews on Rietveld
+# Copyright (C) 2008 Evan Martin <martine@danga.com>
+
+== Background
+Rietveld, also known as http://codereview.appspot.com, is a nice tool
+for code reviews.  You upload a patch (and some other data) and it lets
+others comment on your patch.
+
+For more on how this all works conceptually, please see README.codereview.
+The remainder of this document is the nuts and bolts of using git-cl.
+
+== Install
+Copy (symlink) it into your path somewhere, along with Rietveld
+upload.py.
+
+== Setup
+Run this from your git checkout and answer some questions:
+$ git cl config
+
+== How to use it
+Make a new branch.  Write some code.  Commit it locally.  Send it for
+review:
+$ git cl upload
+By default, it diffs against whatever branch the current branch is
+tracking (see "git checkout --track").  An optional last argument is
+passed to "git diff", allowing reviews against other heads.
+
+You'll be asked some questions, and the review issue number will be
+associated with your current git branch, so subsequent calls to upload
+will update that review rather than making a new one.
+
+== git-svn integration
+Review looks good?  Commit the code:
+$ git cl dcommit
+This does a git-svn dcommit, with a twist: all changes in the diff
+will be squashed into a single commit, and the description of the commit
+is taken directly from the Rietveld description.  This command also accepts
+arguments to "git diff", much like upload.
+Try "git cl dcommit --help" for more options.
+
+== Extra commands
+Print some status info:
+$ git cl status
+
+Edit the issue association on the current branch:
+$ git cl issue 1234
+
+Patch in a review:
+$ git cl patch <url to full patch>
+Try "git cl patch --help" for more options.
+
+vim: tw=72 :

+ 99 - 0
README.git-cl.codereview

@@ -0,0 +1,99 @@
+The git-cl README describes the git-cl command set.  This document
+describes how code review and git work together in general, intended
+for people familiar with git but unfamiliar with the code review
+process supported by Rietveld.
+
+== Concepts and terms
+A Rietveld review is for discussion of a single change or patch.  You
+upload a proposed change, the reviewer comments on your change, and
+then you can upload a revised version of your change.  Rietveld stores
+the history of uploaded patches as well as the comments, and can
+compute diffs in between these patches.  The history of a patch is
+very much like a small branch in git, but since Rietveld is
+VCS-agnostic the concepts don't map perfectly.  The identifier for a
+single review+patches+comments in Rietveld is called an "issue".
+
+Rietveld provides a basic uploader that understands git.  This program
+is used by git-cl, and is included in the git-cl repo as upload.py.
+
+== Basic interaction with git
+The fundamental problem you encounter when you try to mix git and code
+review is that with git it's nice to commit code locally, while during
+a code review you're often requested to change something about your
+code.  There are a few different ways you can handle this workflow
+with git:
+
+1) Rewriting a single commit.  Say the origin commit is O, and you
+   commit your initial work in a commit A, making your history like
+   O--A.  After review comments, you commit --amend, effectively
+   erasing A and making a new commit A', so history is now O--A'.
+   (Equivalently, you can use git reset --soft or git rebase -i.)
+
+2) Writing follow-up commits.  Initial work is again in A, and after
+   review comments, you write a new commit B so your history looks
+   like O--A--B.  When you upload the revised patch, you upload the
+   diff of O..B, not A..B; you always upload the full diff of what
+   you're proposing to change.
+
+The Rietveld patch uploader just takes arguments to "git diff", so
+either of the above workflows work fine.  If all you want to do is
+upload a patch, you can use the upload.py provided by Rietveld with
+arguments like this:
+
+  upload.py --server server.com <args to "git diff">
+
+The first time you upload, it creates a new issue; for follow-ups on
+the same issue, you need to provide the issue number:
+
+  upload.py --server server.com --issue 1234 <args to "git diff">
+
+== git-cl to the rescue
+git-cl simplifies the above in the following ways:
+
+1) "git cl config" puts a persistent --server setting in your .git/config.
+
+2) The first time you upload an issue, the issue number is associated with
+   the current *branch*.  If you upload again, it will upload on the same
+   issue.  (Note that this association is tied to a branch, not a commit,
+   which means you need a separate branch per review.)
+
+3) If your branch is "tracking" (in the "git checkout --track" sense)
+   another one (like origin/master), calls to "git cl upload" will
+   diff against that branch by default.  (You can still pass arguments
+   to "git diff" on the command line, if necessary.)
+
+In the common case, this means that calling simply "git cl upload"
+will always upload the correct diff to the correct place.
+
+== Patch series
+The above is all you need to know for working on a single patch.
+
+Things get much more complicated when you have a series of commits
+that you want to get reviewed.  Say your history looks like
+O--A--B--C.  If you want to upload that as a single review, everything
+works just as above.
+
+But what if you upload each of A, B, and C as separate reviews?
+What if you then need to change A?
+
+1) One option is rewriting history: write a new commit A', then use
+   git rebase -i to insert that diff in as O--A--A'--B--C as well as
+   squash it.  This is sometimes not possible if B and C have touched
+   some lines affected by A'.
+ 
+2) Another option, and the one espoused by software like topgit, is for
+   you to have separate branches for A, B, and C, and after writing A'
+   you merge it into each of those branches.  (topgit automates this
+   merging process.)  This is also what is recommended by git-cl, which
+   likes having different branch identifiers to hang the issue number
+   off of.  Your history ends up looking like:
+
+       O---A---B---C
+            \   \   \
+             A'--B'--C'
+
+   Which is ugly, but it accurately tracks the real history of your work, can
+   be thrown away at the end by committing A+A' as a single "squash" commit.
+
+In practice, this comes up pretty rarely.  Suggestions for better workflows
+are welcome.

+ 21 - 0
bootstrap/gclient.bat

@@ -0,0 +1,21 @@
+@echo off
+:: Copyright (c) 2009 The Chromium Authors. All rights reserved.
+:: Use of this source code is governed by a BSD-style license that can be
+:: found in the LICENSE file.
+
+:: This batch file will try to sync the root directory and call back gclient.
+
+setlocal
+
+:: Shall skip automatic update?
+IF "%DEPOT_TOOLS_UPDATE%" == "0" GOTO gclient
+
+:: We can't sync if ..\.svn\. doesn't exist.
+IF NOT EXIST "%~dp0..\.svn" GOTO gclient
+
+:: Sync the .. directory to update the bootstrap at the same time.
+call svn -q up "%~dp0.."
+
+:gclient
+:: Defer control.
+python "%~dp0\..\gclient.py" %*

+ 18 - 0
bootstrap/gclient.sh

@@ -0,0 +1,18 @@
+#!/bin/sh
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script will try to sync the root and bootstrap directories.
+
+base_dir=$(dirname "$0")
+
+# Skip if $DEPOT_TOOLS_UPDATE==0 or ../.svn/. doesn't exist.
+if [ "X$DEPOT_TOOLS_UPDATE" != "X0" -a -e "$base_dir/../.svn" ]
+then
+  # Update the root directory.
+  svn -q up "$base_dir/.."
+fi
+
+exec python "$base_dir/../gclient.py" "$@"
+

+ 504 - 0
bootstrap/win/7z.copying.txt

@@ -0,0 +1,504 @@
+      GNU LESSER GENERAL PUBLIC LICENSE
+           Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+          Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+      GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+          NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+         END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+

+ 30 - 0
bootstrap/win/7z.license.txt

@@ -0,0 +1,30 @@
+      7-Zip Command line version
+      ~~~~~~~~~~~~~~~~~~~~~~~~~~
+      License for use and distribution
+      ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+      7-Zip Copyright (C) 1999-2009 Igor Pavlov.
+
+      7za.exe is distributed under the GNU LGPL license
+
+      Notes: 
+        You can use 7-Zip on any computer, including a computer in a commercial 
+        organization. You don't need to register or pay for 7-Zip.
+
+
+      GNU LGPL information
+      --------------------
+
+        This library is free software; you can redistribute it and/or
+        modify it under the terms of the GNU Lesser General Public
+        License as published by the Free Software Foundation; either
+        version 2.1 of the License, or (at your option) any later version.
+
+        This library is distributed in the hope that it will be useful,
+        but WITHOUT ANY WARRANTY; without even the implied warranty of
+        MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+        Lesser General Public License for more details.
+
+        You should have received a copy of the GNU Lesser General Public
+        License along with this library; if not, write to the Free Software
+        Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

+ 42 - 0
bootstrap/win/7z.readme.txt

@@ -0,0 +1,42 @@
+7-Zip Command line version 4.65
+-------------------------------
+
+7-Zip is a file archiver with high compression ratio.
+7za.exe is a standalone command line version of 7-Zip.
+
+7-Zip Copyright (C) 1999-2009 Igor Pavlov.
+
+Features of 7za.exe: 
+  - High compression ratio in new 7z format
+  - Supported formats:
+      - Packing / unpacking: 7z, ZIP, GZIP, BZIP2 and TAR 
+      - Unpacking only: Z
+  - Highest compression ratio for ZIP and GZIP formats.
+  - Fast compression and decompression
+  - Strong AES-256 encryption in 7z and ZIP formats.
+
+7za.exe is a free software distributed under the GNU LGPL.
+Read license.txt for more information.
+
+Source code of 7za.exe and 7-Zip can be found at
+http://www.7-zip.org/
+
+7za.exe can work in Windows 95/98/ME/NT/2000/XP/2003/Vista.
+
+There is also port of 7za.exe for POSIX systems like Unix (Linux, Solaris, OpenBSD, 
+FreeBSD, Cygwin, AIX, ...), MacOS X and BeOS:
+
+http://p7zip.sourceforge.net/
+
+
+  This distributive packet contains the following files:
+
+  7za.exe       - 7-Zip standalone command line version.
+  readme.txt    - This file.
+  copying.txt   - GNU LGPL license.
+  license.txt   - License information.
+  7-zip.chm     - User's Manual in HTML Help format.
+
+
+---
+End of document

BIN
bootstrap/win/7za.exe


+ 16 - 0
bootstrap/win/README.google

@@ -0,0 +1,16 @@
+Including the following third parties:
+
+7zip version 4.65
+7za.exe is a free software distributed under the GNU LGPL.
+Read license.txt for more information.
+
+Source code of 7za.exe and 7-Zip can be found at
+http://www.7-zip.org/
+
+Note: packed with UPX to reduce code size.
+
+
+wget 1.11.4 from http://www.gnu.org/software/wget/
+
+Note: local compile without openssl support to reduce code size.
+Note: packed with UPX to reduce code size.

+ 1 - 0
bootstrap/win/python.bat

@@ -0,0 +1 @@
+@"%~dp0python\python.exe" %*

+ 1 - 0
bootstrap/win/svn.bat

@@ -0,0 +1 @@
+@"%~dp0svn\svn.exe" %*

BIN
bootstrap/win/wget.exe


+ 65 - 0
bootstrap/win/win_tools.bat

@@ -0,0 +1,65 @@
+@echo off
+:: Copyright (c) 2009 The Chromium Authors. All rights reserved.
+:: Use of this source code is governed by a BSD-style license that can be
+:: found in the LICENSE file.
+
+:: This script will try to find if svn and python are accessible and it not,
+:: it will try to download it and 'install' it in depot_tools.
+
+SETLOCAL
+
+set ROOT_URL=http://src.chromium.org/svn/trunk/tools
+set ROOT_DIR=%~dp0..\..
+
+:: If the batch file exists, skip the svn check.
+if exist "%ROOT_DIR%\svn.bat" goto :PYTHON
+call svn --version 2>nul 1>nul
+if errorlevel 1 call :C_SVN
+
+:PYTHON
+:: If the batch file exists, skip the python check.
+if exist "%ROOT_DIR%\python.bat" goto :EOF
+call python --version 2>nul 1>nul
+if errorlevel 1 call :C_PYTHON
+
+:: We are done.
+goto :EOF
+
+
+:C_SVN
+echo Installing subversion ...
+:: svn is not accessible; check it out and create 'proxy' files.
+call "%~dp0wget" -q %ROOT_URL%/third_party/svn.7z -O "%~dp0svn.7z"
+if errorlevel 1 goto :SVN_FAIL
+call "%~dp07za" x "%~dp0svn.7z" %ROOT_DIR%
+if errorlevel 1 goto :SVN_FAIL
+del "%~dp0svn.7z"
+:: Create the batch file.
+copy "%~dp0svn.bat" "%ROOT_DIR%"
+goto :EOF
+
+
+:SVN_FAIL
+echo Failed to checkout svn automatically.
+echo Please visit http://subversion.tigris.org to download the latest subversion client
+echo before continuing.
+echo You can also get the "prebacked" version used at %ROOT_URL%/third_party/
+:: Still try python.
+goto :C_PYTHON
+
+
+:C_PYTHON
+echo Installing python ...
+call svn co %ROOT_URL%/third_party/python "%ROOT_DIR%\python"
+if errorlevel 1 goto :PYTHON_FAIL
+:: Create the batch file.
+copy "%~dp0python.bat" "%ROOT_DIR%"
+goto :EOF
+
+
+:PYTHON_FAIL
+echo Failed to checkout python automatically.
+echo Please visit http://python.org to download the latest python 2.x client before
+echo continuing.
+echo You can also get the "prebacked" version used at %ROOT_URL%/third_party/
+goto :EOF

+ 53 - 0
chrome-update-create-task.bat

@@ -0,0 +1,53 @@
+@echo off
+:: Copyright (c) 2009 The Chromium Authors. All rights reserved.
+:: Use of this source code is governed by a BSD-style license that can be
+:: found in the LICENSE file.
+
+setlocal
+
+:: This script will create a scheduled task to run chrome-update every day
+:: at the time you specify.  This script expects to be live in
+:: depot_tools\latest.
+::
+:: Usage: this-script <time to run task> <path to chrome trunk>
+
+set Out=%USERPROFILE%\chrome-update-task.bat
+set TaskTime=%1
+set Trunk=%~f2
+
+if not exist "%Trunk%" (
+  echo Usage: %~n0 ^<time^> ^<c:\path\to\chrome\trunk^>
+  echo ^<time^> is the time in HH:MM:SS format at which to run the task.
+  echo Example: %~n0 02:00:00 c:\src\chrome\trunk
+  exit 1
+)
+
+if not exist "%Out%" goto CreateScript
+
+echo WARNING: %Out% already exists.
+set Choice=
+set /P Choice=Overwrite file [Y/N]?
+if not "%Choice%"=="y" goto CreateTask
+
+:CreateScript
+
+echo.
+echo Creating %Out%
+
+echo>"%Out%" @echo off
+echo>>"%Out%" call "%~dp0\bootstrap\update.bat"
+echo>>"%Out%" "%~dp0\chrome-update.bat" "%Trunk%" ^> "%Trunk%\chrome-update-results.txt" 
+
+:CreateTask
+
+echo.
+echo ***********************************************************************
+echo Creating a Scheduled Task to run chrome-update each day at %TaskTime%.
+echo The batch file being run will live at %Out%.
+echo.
+echo WARNING: The password you enter will be displayed in cleartext.
+echo If you're paranoid, you can enter blank here and then fix the password
+echo by editing the scheduled task manually from the Control Panel.
+echo ***********************************************************************
+echo.
+schtasks /create /tn chrome-update /tr "\"%Out%\"" /sc daily /st %TaskTime%

+ 10 - 0
chrome-update.bat

@@ -0,0 +1,10 @@
+@echo off
+
+:: This batch file assumes that the correct version of python can be found in
+:: the current directory, and that you have Visual Studio 8 installed in the
+:: default location.
+
+setlocal
+call vcvars32.bat
+
+python "%~dp0chrome-update.py" %*

+ 170 - 0
chrome-update.py

@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Author: mpcomplete
+#
+# This script updates and does a clean build of chrome for you.
+# Usage: python chrome-update.py C:\path\to\chrome\trunk
+#
+# It assumes the following:
+# - You have gclient.bat and devenv.com in your path (use the wrapper batch
+#   file to ensure this).
+
+import sys
+import os
+import subprocess
+import httplib
+import re
+import shutil
+import optparse
+
+def Message(str):
+  """Prints a status message."""
+  print "[chrome-update]", str
+
+def FixupPath(path):
+  """Returns the OS-ified version of a windows path."""
+  return os.path.sep.join(path.split("\\"))
+
+def GetRevision():
+  """Returns the revision number of the last build that was archived, or
+  None on failure."""
+  HOST = "build.chromium.org"
+  PATH = "/buildbot/continuous/LATEST/REVISION"
+  EXPR = r"(\d+)"
+
+  connection = httplib.HTTPConnection(HOST)
+  connection.request("GET", PATH)
+  response = connection.getresponse()
+  text = response.read()
+  match = re.search(EXPR, text)
+  if match:
+    return int(match.group(1))
+  return None
+
+def SetRevisionForUpdate(chrome_root):
+  """Prepares environment so gclient syncs to a good revision, if possible."""
+  # Find a buildable revision.
+  rev = GetRevision()
+  if rev == None:
+    Message("WARNING: Failed to find a buildable revision.  Syncing to trunk.")
+    return "trunk"
+
+  # Read the .gclient file.
+  gclient_file = chrome_root + FixupPath("\\.gclient")
+  if not os.path.exists(gclient_file):
+    Message("WARNING: Failed to find .gclient file.  Syncing to trunk.")
+    return "trunk"
+  scope = {}
+  execfile(gclient_file, scope)
+  solutions = scope["solutions"]
+
+  # Edit the url of the chrome 'src' solution, unless the user wants a
+  # specific revision.
+  for solution in solutions:
+    if solution["name"] == "src":
+      splitter = solution["url"].split("@")
+      if len(splitter) == 1:
+        solution["url"] = splitter[0] + "@" + str(rev)
+      else:
+        rev = int(splitter[1])
+      break
+
+  # Write out the new .gclient file.
+  gclient_override = gclient_file + "-update-chrome"
+  f = open(gclient_override, "w")
+  f.write("solutions = " + str(solutions))
+  f.close()
+
+  # Set the env var that the gclient tool looks for.
+  os.environ["GCLIENT_FILE"] = gclient_override
+  return rev
+
+def DoUpdate(chrome_root):
+  """gclient sync to the latest build."""
+  # gclient sync
+  rev = SetRevisionForUpdate(chrome_root)
+
+  cmd = ["gclient.bat", "sync"]
+  Message("Updating to %s: %s" % (rev, cmd))
+  sys.stdout.flush()
+  return subprocess.call(cmd, cwd=chrome_root)
+
+def DoClean(chrome_root, type):
+  """Clean our build dir."""
+  # rm -rf src/chrome/Debug
+  rv = [0]
+  def onError(func, path, excinfo):
+    Message("Couldn't remove '%s': %s" % (path, excinfo))
+    rv[0] = [1]
+
+  build_path = chrome_root + FixupPath("\\src\\chrome\\" + type)
+  Message("Cleaning: %s" % build_path)
+  shutil.rmtree(build_path, False, onError)
+  return rv[0]
+
+def DoBuild(chrome_root, chrome_sln, clean, type):
+  """devenv /build what we just checked out."""
+  if clean:
+    rv = DoClean(chrome_root, type)
+    if rv != 0:
+      Message("WARNING: Clean failed.  Doing a build without clean.")
+
+  # devenv chrome.sln /build Debug
+  cmd = ["devenv.com", chrome_sln, "/build", type]
+
+  Message("Building: %s" % cmd)
+  sys.stdout.flush()
+  return subprocess.call(cmd, cwd=chrome_root)
+
+def Main():
+  parser = optparse.OptionParser()
+  parser.add_option("", "--clean", action="store_true", default=False,
+                    help="wipe Debug output directory before building")
+  parser.add_option("", "--solution", default="src\\chrome\\chrome.sln",
+                    help="path to the .sln file to build (absolute, or "
+                         "relative to chrome trunk")
+  parser.add_option("", "--release", action="store_true", default=False,
+                    help="build the release configuration in addition of the "
+                         "debug configuration.")
+  parser.add_option("", "--nosync", action="store_true", default=False,
+                    help="doesn't sync before building")
+  parser.add_option("", "--print-latest", action="store_true", default=False,
+                    help="print the latest buildable revision and exit")
+  options, args = parser.parse_args()
+
+  if options.print_latest:
+    print GetRevision() or "HEAD"
+    sys.exit(0)
+
+  if not args:
+    Message("Usage: %s <path\\to\\chrome\\root> [options]" % sys.argv[0])
+    sys.exit(1)
+
+  chrome_root = args[0]
+
+  if not options.nosync:
+    rv = DoUpdate(chrome_root)
+    if rv != 0:
+      Message("Update Failed.  Bailing.")
+      sys.exit(rv)
+
+  chrome_sln = FixupPath(options.solution)
+  rv = DoBuild(chrome_root, chrome_sln, options.clean, "Debug")
+  if rv != 0:
+    Message("Debug build failed.  Sad face :(")
+
+  if options.release:
+    rv = DoBuild(chrome_root, chrome_sln, options.clean, "Release")
+    if rv != 0:
+      Message("Release build failed.  Sad face :(")
+
+  if rv != 0:
+    sys.exit(rv)
+
+  Message("Success!")
+
+if __name__ == "__main__":
+  Main()

+ 2725 - 0
cpplint.py

@@ -0,0 +1,2725 @@
+#!/usr/bin/python2.4
+#
+# cpplint.py is Copyright (C) 2009 Google Inc.
+#
+# It is free software; you can redistribute it and/or modify it under the
+# terms of either:
+#
+# a) the GNU General Public License as published by the Free Software
+# Foundation; either version 1, or (at your option) any later version, or
+#
+# b) the "Artistic License".
+
+# Here are some issues that I've had people identify in my code during reviews,
+# that I think are possible to flag automatically in a lint tool.  If these were
+# caught by lint, it would save time both for myself and that of my reviewers.
+# Most likely, some of these are beyond the scope of the current lint framework,
+# but I think it is valuable to retain these wish-list items even if they cannot
+# be immediately implemented.
+#
+#  Suggestions
+#  -----------
+#  - Check for no 'explicit' for multi-arg ctor
+#  - Check for boolean assign RHS in parens
+#  - Check for ctor initializer-list colon position and spacing
+#  - Check that if there's a ctor, there should be a dtor
+#  - Check accessors that return non-pointer member variables are
+#    declared const
+#  - Check accessors that return non-const pointer member vars are
+#    *not* declared const
+#  - Check for using public includes for testing
+#  - Check for spaces between brackets in one-line inline method
+#  - Check for no assert()
+#  - Check for spaces surrounding operators
+#  - Check for 0 in pointer context (should be NULL)
+#  - Check for 0 in char context (should be '\0')
+#  - Check for camel-case method name conventions for methods
+#    that are not simple inline getters and setters
+#  - Check that base classes have virtual destructors
+#    put "  // namespace" after } that closes a namespace, with
+#    namespace's name after 'namespace' if it is named.
+#  - Do not indent namespace contents
+#  - Avoid inlining non-trivial constructors in header files
+#    include base/basictypes.h if DISALLOW_EVIL_CONSTRUCTORS is used
+#  - Check for old-school (void) cast for call-sites of functions
+#    ignored return value
+#  - Check gUnit usage of anonymous namespace
+#  - Check for class declaration order (typedefs, consts, enums,
+#    ctor(s?), dtor, friend declarations, methods, member vars)
+#
+
+"""Does google-lint on c++ files.
+
+The goal of this script is to identify places in the code that *may*
+be in non-compliance with google style.  It does not attempt to fix
+up these problems -- the point is to educate.  It does also not
+attempt to find all problems, or to ensure that everything it does
+find is legitimately a problem.
+
+In particular, we can get very confused by /* and // inside strings!
+We do a small hack, which is to ignore //'s with "'s after them on the
+same line, but it is far from perfect (in either direction).
+"""
+
+import codecs
+import getopt
+import math  # for log
+import os
+import re
+import sre_compile
+import string
+import sys
+import unicodedata
+
+
+_USAGE = """
+Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
+        <file> [file] ...
+
+  The style guidelines this tries to follow are those in
+    http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
+
+  Every problem is given a confidence score from 1-5, with 5 meaning we are
+  certain of the problem, and 1 meaning it could be a legitimate construct.
+  This will miss some errors, and is not a substitute for a code review.
+
+  To prevent specific lines from being linted, add a '// NOLINT' comment to the
+  end of the line.
+
+  The files passed in will be linted; at least one file must be provided.
+  Linted extensions are .cc, .cpp, and .h.  Other file types will be ignored.
+
+  Flags:
+
+    output=vs7
+      By default, the output is formatted to ease emacs parsing.  Visual Studio
+      compatible output (vs7) may also be used.  Other formats are unsupported.
+
+    verbose=#
+      Specify a number 0-5 to restrict errors to certain verbosity levels.
+
+    filter=-x,+y,...
+      Specify a comma-separated list of category-filters to apply: only
+      error messages whose category names pass the filters will be printed.
+      (Category names are printed with the message and look like
+      "[whitespace/indent]".)  Filters are evaluated left to right.
+      "-FOO" and "FOO" means "do not print categories that start with FOO".
+      "+FOO" means "do print categories that start with FOO".
+
+      Examples: --filter=-whitespace,+whitespace/braces
+                --filter=whitespace,runtime/printf,+runtime/printf_format
+                --filter=-,+build/include_what_you_use
+
+      To see a list of all the categories used in cpplint, pass no arg:
+         --filter=
+"""
+
+# We categorize each error message we print.  Here are the categories.
+# We want an explicit list so we can list them all in cpplint --filter=.
+# If you add a new error message with a new category, add it to the list
+# here!  cpplint_unittest.py should tell you if you forget to do this.
+_ERROR_CATEGORIES = """\
+  build/class
+  build/deprecated
+  build/endif_comment
+  build/forward_decl
+  build/header_guard
+  build/include
+  build/include_order
+  build/include_what_you_use
+  build/namespaces
+  build/printf_format
+  build/storage_class
+  legal/copyright
+  readability/braces
+  readability/casting
+  readability/check
+  readability/constructors
+  readability/fn_size
+  readability/function
+  readability/multiline_comment
+  readability/multiline_string
+  readability/streams
+  readability/todo
+  readability/utf8
+  runtime/arrays
+  runtime/casting
+  runtime/explicit
+  runtime/int
+  runtime/init
+  runtime/memset
+  runtime/printf
+  runtime/printf_format
+  runtime/references
+  runtime/rtti
+  runtime/sizeof
+  runtime/string
+  runtime/threadsafe_fn
+  runtime/virtual
+  whitespace/blank_line
+  whitespace/braces
+  whitespace/comma
+  whitespace/comments
+  whitespace/end_of_line
+  whitespace/ending_newline
+  whitespace/indent
+  whitespace/labels
+  whitespace/line_length
+  whitespace/newline
+  whitespace/operators
+  whitespace/parens
+  whitespace/semicolon
+  whitespace/tab
+  whitespace/todo
+"""
+
+# We used to check for high-bit characters, but after much discussion we
+# decided those were OK, as long as they were in UTF-8 and didn't represent
+# hard-coded international strings, which belong in a seperate i18n file.
+
+# Headers that we consider STL headers.
+_STL_HEADERS = frozenset([
+    'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
+    'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
+    'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
+    'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
+    'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
+    'utility', 'vector', 'vector.h',
+    ])
+
+
+# Non-STL C++ system headers.
+_CPP_HEADERS = frozenset([
+    'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
+    'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
+    'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+    'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
+    'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
+    'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
+    'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
+    'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
+    'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
+    'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
+    'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
+    'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
+    'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
+    ])
+
+
+# Assertion macros.  These are defined in base/logging.h and
+# testing/base/gunit.h.  Note that the _M versions need to come first
+# for substring matching to work.
+_CHECK_MACROS = [
+    'CHECK',
+    'EXPECT_TRUE_M', 'EXPECT_TRUE',
+    'ASSERT_TRUE_M', 'ASSERT_TRUE',
+    'EXPECT_FALSE_M', 'EXPECT_FALSE',
+    'ASSERT_FALSE_M', 'ASSERT_FALSE',
+    ]
+
+# Replacement macros for CHECK/EXPECT_TRUE/EXPECT_FALSE
+_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+
+for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
+                        ('>=', 'GE'), ('>', 'GT'),
+                        ('<=', 'LE'), ('<', 'LT')]:
+  _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
+  _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
+  _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+  _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
+  _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
+
+for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
+                            ('>=', 'LT'), ('>', 'LE'),
+                            ('<=', 'GT'), ('<', 'GE')]:
+  _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
+  _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+  _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
+  _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
+
+
+# These constants define types of headers for use with
+# _IncludeState.CheckNextIncludeOrder().
+_C_SYS_HEADER = 1
+_CPP_SYS_HEADER = 2
+_LIKELY_MY_HEADER = 3
+_POSSIBLE_MY_HEADER = 4
+_OTHER_HEADER = 5
+
+
+_regexp_compile_cache = {}
+
+
+def Match(pattern, s):
+  """Matches the string with the pattern, caching the compiled regexp."""
+  # The regexp compilation caching is inlined in both Match and Search for
+  # performance reasons; factoring it out into a separate function turns out
+  # to be noticeably expensive.
+  if not pattern in _regexp_compile_cache:
+    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+  return _regexp_compile_cache[pattern].match(s)
+
+
+def Search(pattern, s):
+  """Searches the string for the pattern, caching the compiled regexp."""
+  if not pattern in _regexp_compile_cache:
+    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+  return _regexp_compile_cache[pattern].search(s)
+
+
+class _IncludeState(dict):
+  """Tracks line numbers for includes, and the order in which includes appear.
+
+  As a dict, an _IncludeState object serves as a mapping between include
+  filename and line number on which that file was included.
+
+  Call CheckNextIncludeOrder() once for each header in the file, passing
+  in the type constants defined above. Calls in an illegal order will
+  raise an _IncludeError with an appropriate error message.
+
+  """
+  # self._section will move monotonically through this set. If it ever
+  # needs to move backwards, CheckNextIncludeOrder will raise an error.
+  _INITIAL_SECTION = 0
+  _MY_H_SECTION = 1
+  _C_SECTION = 2
+  _CPP_SECTION = 3
+  _OTHER_H_SECTION = 4
+
+  _TYPE_NAMES = {
+      _C_SYS_HEADER: 'C system header',
+      _CPP_SYS_HEADER: 'C++ system header',
+      _LIKELY_MY_HEADER: 'header this file implements',
+      _POSSIBLE_MY_HEADER: 'header this file may implement',
+      _OTHER_HEADER: 'other header',
+      }
+  _SECTION_NAMES = {
+      _INITIAL_SECTION: "... nothing. (This can't be an error.)",
+      _MY_H_SECTION: 'a header this file implements',
+      _C_SECTION: 'C system header',
+      _CPP_SECTION: 'C++ system header',
+      _OTHER_H_SECTION: 'other header',
+      }
+
+  def __init__(self):
+    dict.__init__(self)
+    self._section = self._INITIAL_SECTION
+
+  def CheckNextIncludeOrder(self, header_type):
+    """Returns a non-empty error message if the next header is out of order.
+
+    This function also updates the internal state to be ready to check
+    the next include.
+
+    Args:
+      header_type: One of the _XXX_HEADER constants defined above.
+
+    Returns:
+      The empty string if the header is in the right order, or an
+      error message describing what's wrong.
+
+    """
+    error_message = ('Found %s after %s' %
+                     (self._TYPE_NAMES[header_type],
+                      self._SECTION_NAMES[self._section]))
+
+    if header_type == _C_SYS_HEADER:
+      if self._section <= self._C_SECTION:
+        self._section = self._C_SECTION
+      else:
+        return error_message
+    elif header_type == _CPP_SYS_HEADER:
+      if self._section <= self._CPP_SECTION:
+        self._section = self._CPP_SECTION
+      else:
+        return error_message
+    elif header_type == _LIKELY_MY_HEADER:
+      if self._section <= self._MY_H_SECTION:
+        self._section = self._MY_H_SECTION
+      else:
+        self._section = self._OTHER_H_SECTION
+    elif header_type == _POSSIBLE_MY_HEADER:
+      if self._section <= self._MY_H_SECTION:
+        self._section = self._MY_H_SECTION
+      else:
+        # This will always be the fallback because we're not sure
+        # enough that the header is associated with this file.
+        self._section = self._OTHER_H_SECTION
+    else:
+      assert header_type == _OTHER_HEADER
+      self._section = self._OTHER_H_SECTION
+
+    return ''
+
+
+class _CppLintState(object):
+  """Maintains module-wide state.."""
+
+  def __init__(self):
+    self.verbose_level = 1  # global setting.
+    self.error_count = 0    # global count of reported errors
+    self.filters = []       # filters to apply when emitting error messages
+
+    # output format:
+    # "emacs" - format that emacs can parse (default)
+    # "vs7" - format that Microsoft Visual Studio 7 can parse
+    self.output_format = 'emacs'
+
+  def SetOutputFormat(self, output_format):
+    """Sets the output format for errors."""
+    self.output_format = output_format
+
+  def SetVerboseLevel(self, level):
+    """Sets the module's verbosity, and returns the previous setting."""
+    last_verbose_level = self.verbose_level
+    self.verbose_level = level
+    return last_verbose_level
+
+  def SetFilters(self, filters):
+    """Sets the error-message filters.
+
+    These filters are applied when deciding whether to emit a given
+    error message.
+
+    Args:
+      filters: A string of comma-separated filters (eg "+whitespace/indent").
+               Each filter should start with + or -; else we die.
+    """
+    if not filters:
+      self.filters = []
+    else:
+      self.filters = filters.split(',')
+    for filt in self.filters:
+      if not (filt.startswith('+') or filt.startswith('-')):
+        raise ValueError('Every filter in --filters must start with + or -'
+                         ' (%s does not)' % filt)
+
+  def ResetErrorCount(self):
+    """Sets the module's error statistic back to zero."""
+    self.error_count = 0
+
+  def IncrementErrorCount(self):
+    """Bumps the module's error statistic."""
+    self.error_count += 1
+
+
+_cpplint_state = _CppLintState()
+
+
+def _OutputFormat():
+  """Gets the module's output format."""
+  return _cpplint_state.output_format
+
+
+def _SetOutputFormat(output_format):
+  """Sets the module's output format."""
+  _cpplint_state.SetOutputFormat(output_format)
+
+
+def _VerboseLevel():
+  """Returns the module's verbosity setting."""
+  return _cpplint_state.verbose_level
+
+
+def _SetVerboseLevel(level):
+  """Sets the module's verbosity, and returns the previous setting."""
+  return _cpplint_state.SetVerboseLevel(level)
+
+
+def _Filters():
+  """Returns the module's list of output filters, as a list."""
+  return _cpplint_state.filters
+
+
+def _SetFilters(filters):
+  """Sets the module's error-message filters.
+
+  These filters are applied when deciding whether to emit a given
+  error message.
+
+  Args:
+    filters: A string of comma-separated filters (eg "whitespace/indent").
+             Each filter should start with + or -; else we die.
+  """
+  _cpplint_state.SetFilters(filters)
+
+
+class _FunctionState(object):
+  """Tracks current function name and the number of lines in its body."""
+
+  _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
+  _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
+
+  def __init__(self):
+    self.in_a_function = False
+    self.lines_in_function = 0
+    self.current_function = ''
+
+  def Begin(self, function_name):
+    """Start analyzing function body.
+
+    Args:
+      function_name: The name of the function being tracked.
+    """
+    self.in_a_function = True
+    self.lines_in_function = 0
+    self.current_function = function_name
+
+  def Count(self):
+    """Count line in current function body."""
+    if self.in_a_function:
+      self.lines_in_function += 1
+
+  def Check(self, error, filename, linenum):
+    """Report if too many lines in function body.
+
+    Args:
+      error: The function to call with any errors found.
+      filename: The name of the current file.
+      linenum: The number of the line to check.
+    """
+    if Match(r'T(EST|est)', self.current_function):
+      base_trigger = self._TEST_TRIGGER
+    else:
+      base_trigger = self._NORMAL_TRIGGER
+    trigger = base_trigger * 2**_VerboseLevel()
+
+    if self.lines_in_function > trigger:
+      error_level = int(math.log(self.lines_in_function / base_trigger, 2))
+      # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+      if error_level > 5:
+        error_level = 5
+      error(filename, linenum, 'readability/fn_size', error_level,
+            'Small and focused functions are preferred:'
+            ' %s has %d non-comment lines'
+            ' (error triggered by exceeding %d lines).'  % (
+                self.current_function, self.lines_in_function, trigger))
+
+  def End(self):
+    """Stop analizing function body."""
+    self.in_a_function = False
+
+
+class _IncludeError(Exception):
+  """Indicates a problem with the include order in a file."""
+  pass
+
+
+class FileInfo:
+  """Provides utility functions for filenames.
+
+  FileInfo provides easy access to the components of a file's path
+  relative to the project root.
+  """
+
+  def __init__(self, filename):
+    self._filename = filename
+
+  def FullName(self):
+    """Make Windows paths like Unix."""
+    return os.path.abspath(self._filename).replace('\\', '/')
+
+  def RepositoryName(self):
+    """FullName after removing the local path to the repository.
+
+    If we have a real absolute path name here we can try to do something smart:
+    detecting the root of the checkout and truncating /path/to/checkout from
+    the name so that we get header guards that don't include things like
+    "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+    people on different computers who have checked the source out to different
+    locations won't see bogus errors.
+    """
+    fullname = self.FullName()
+
+    if os.path.exists(fullname):
+      project_dir = os.path.dirname(fullname)
+
+      if os.path.exists(os.path.join(project_dir, ".svn")):
+        # If there's a .svn file in the current directory, we recursively look
+        # up the directory tree for the top of the SVN checkout
+        root_dir = project_dir
+        one_up_dir = os.path.dirname(root_dir)
+        while os.path.exists(os.path.join(one_up_dir, ".svn")):
+          root_dir = os.path.dirname(root_dir)
+          one_up_dir = os.path.dirname(one_up_dir)
+
+        prefix = os.path.commonprefix([root_dir, project_dir])
+        return fullname[len(prefix) + 1:]
+
+      # Not SVN? Try to find a git top level directory by searching up from the
+      # current path.
+      root_dir = os.path.dirname(fullname)
+      while (root_dir != os.path.dirname(root_dir) and
+             not os.path.exists(os.path.join(root_dir, ".git"))):
+        root_dir = os.path.dirname(root_dir)
+        if os.path.exists(os.path.join(root_dir, ".git")):
+          prefix = os.path.commonprefix([root_dir, project_dir])
+          return fullname[len(prefix) + 1:]
+
+    # Don't know what to do; header guard warnings may be wrong...
+    return fullname
+
+  def Split(self):
+    """Splits the file into the directory, basename, and extension.
+
+    For 'chrome/browser/browser.cc', Split() would
+    return ('chrome/browser', 'browser', '.cc')
+
+    Returns:
+      A tuple of (directory, basename, extension).
+    """
+
+    googlename = self.RepositoryName()
+    project, rest = os.path.split(googlename)
+    return (project,) + os.path.splitext(rest)
+
+  def BaseName(self):
+    """File base name - text after the final slash, before the final period."""
+    return self.Split()[1]
+
+  def Extension(self):
+    """File extension - text following the final period."""
+    return self.Split()[2]
+
+  def NoExtension(self):
+    """File has no source file extension."""
+    return '/'.join(self.Split()[0:2])
+
+  def IsSource(self):
+    """File has a source file extension."""
+    return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
+
+
+def _ShouldPrintError(category, confidence):
+  """Returns true iff confidence >= verbose, and category passes filter."""
+  # There are two ways we might decide not to print an error message:
+  # the verbosity level isn't high enough, or the filters filter it out.
+  if confidence < _cpplint_state.verbose_level:
+    return False
+
+  is_filtered = False
+  for one_filter in _Filters():
+    if one_filter.startswith('-'):
+      if category.startswith(one_filter[1:]):
+        is_filtered = True
+    elif one_filter.startswith('+'):
+      if category.startswith(one_filter[1:]):
+        is_filtered = False
+    else:
+      assert False  # should have been checked for in SetFilter.
+  if is_filtered:
+    return False
+
+  return True
+
+
+def Error(filename, linenum, category, confidence, message):
+  """Logs the fact we've found a lint error.
+
+  We log where the error was found, and also our confidence in the error,
+  that is, how certain we are this is a legitimate style regression, and
+  not a misidentification or a use that's sometimes justified.
+
+  Args:
+    filename: The name of the file containing the error.
+    linenum: The number of the line containing the error.
+    category: A string used to describe the "category" this bug
+      falls under: "whitespace", say, or "runtime".  Categories
+      may have a hierarchy separated by slashes: "whitespace/indent".
+    confidence: A number from 1-5 representing a confidence score for
+      the error, with 5 meaning that we are certain of the problem,
+      and 1 meaning that it could be a legitimate construct.
+    message: The error message.
+  """
+  # There are two ways we might decide not to print an error message:
+  # the verbosity level isn't high enough, or the filters filter it out.
+  if _ShouldPrintError(category, confidence):
+    _cpplint_state.IncrementErrorCount()
+    if _cpplint_state.output_format == 'vs7':
+      sys.stderr.write('%s(%s):  %s  [%s] [%d]\n' % (
+          filename, linenum, message, category, confidence))
+    else:
+      sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
+          filename, linenum, message, category, confidence))
+
+
+# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
+_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
+    r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
+# Matches strings.  Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
+# Matches characters.  Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
+# Matches multi-line C++ comments.
+# This RE is a little bit more complicated than one might expect, because we
+# have to take care of space removals tools so we can handle comments inside
+# statements better.
+# The current rule is: We only clear spaces from both sides when we're at the
+# end of the line. Otherwise, we try to remove spaces from the right side,
+# if this doesn't work we try on left side but only if there's a non-character
+# on the right.
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
+    r"""(\s*/\*.*\*/\s*$|
+            /\*.*\*/\s+|
+         \s+/\*.*\*/(?=\W)|
+            /\*.*\*/)""", re.VERBOSE)
+
+
+def IsCppString(line):
+  """Does line terminate so, that the next symbol is in string constant.
+
+  This function does not consider single-line nor multi-line comments.
+
+  Args:
+    line: is a partial line of code starting from the 0..n.
+
+  Returns:
+    True, if next character appended to 'line' is inside a
+    string constant.
+  """
+
+  line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
+  return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+
+
+def FindNextMultiLineCommentStart(lines, lineix):
+  """Find the beginning marker for a multiline comment."""
+  while lineix < len(lines):
+    if lines[lineix].strip().startswith('/*'):
+      # Only return this marker if the comment goes beyond this line
+      if lines[lineix].strip().find('*/', 2) < 0:
+        return lineix
+    lineix += 1
+  return len(lines)
+
+
+def FindNextMultiLineCommentEnd(lines, lineix):
+  """We are inside a comment, find the end marker."""
+  while lineix < len(lines):
+    if lines[lineix].strip().endswith('*/'):
+      return lineix
+    lineix += 1
+  return len(lines)
+
+
+def RemoveMultiLineCommentsFromRange(lines, begin, end):
+  """Clears a range of lines for multi-line comments."""
+  # Having // dummy comments makes the lines non-empty, so we will not get
+  # unnecessary blank line warnings later in the code.
+  for i in range(begin, end):
+    lines[i] = '// dummy'
+
+
+def RemoveMultiLineComments(filename, lines, error):
+  """Removes multiline (c-style) comments from lines."""
+  lineix = 0
+  while lineix < len(lines):
+    lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
+    if lineix_begin >= len(lines):
+      return
+    lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
+    if lineix_end >= len(lines):
+      error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
+            'Could not find end of multi-line comment')
+      return
+    RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
+    lineix = lineix_end + 1
+
+
+def CleanseComments(line):
+  """Removes //-comments and single-line C-style /* */ comments.
+
+  Args:
+    line: A line of C++ source.
+
+  Returns:
+    The line with single-line comments removed.
+  """
+  commentpos = line.find('//')
+  if commentpos != -1 and not IsCppString(line[:commentpos]):
+    line = line[:commentpos]
+  # get rid of /* ... */
+  return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+
+
+class CleansedLines:
+  """Holds 3 copies of all lines with different preprocessing applied to them.
+
+  1) elided member contains lines without strings and comments,
+  2) lines member contains lines without comments, and
+  3) raw member contains all the lines without processing.
+  All these three members are of <type 'list'>, and of the same length.
+  """
+
+  def __init__(self, lines):
+    self.elided = []
+    self.lines = []
+    self.raw_lines = lines
+    self.num_lines = len(lines)
+    for linenum in range(len(lines)):
+      self.lines.append(CleanseComments(lines[linenum]))
+      elided = self._CollapseStrings(lines[linenum])
+      self.elided.append(CleanseComments(elided))
+
+  def NumLines(self):
+    """Returns the number of lines represented."""
+    return self.num_lines
+
+  @staticmethod
+  def _CollapseStrings(elided):
+    """Collapses strings and chars on a line to simple "" or '' blocks.
+
+    We nix strings first so we're not fooled by text like '"http://"'
+
+    Args:
+      elided: The line being processed.
+
+    Returns:
+      The line with collapsed strings.
+    """
+    if not _RE_PATTERN_INCLUDE.match(elided):
+      # Remove escaped characters first to make quote/single quote collapsing
+      # basic.  Things that look like escaped characters shouldn't occur
+      # outside of strings and chars.
+      elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
+      elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
+      elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
+    return elided
+
+
+def CloseExpression(clean_lines, linenum, pos):
+  """If input points to ( or { or [, finds the position that closes it.
+
+  If lines[linenum][pos] points to a '(' or '{' or '[', finds the the
+  linenum/pos that correspond to the closing of the expression.
+
+  Args:
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    pos: A position on the line.
+
+  Returns:
+    A tuple (line, linenum, pos) pointer *past* the closing brace, or
+    (line, len(lines), -1) if we never find a close.  Note we ignore
+    strings and comments when matching; and the line we return is the
+    'cleansed' line at linenum.
+  """
+
+  line = clean_lines.elided[linenum]
+  startchar = line[pos]
+  if startchar not in '({[':
+    return (line, clean_lines.NumLines(), -1)
+  if startchar == '(': endchar = ')'
+  if startchar == '[': endchar = ']'
+  if startchar == '{': endchar = '}'
+
+  num_open = line.count(startchar) - line.count(endchar)
+  while linenum < clean_lines.NumLines() and num_open > 0:
+    linenum += 1
+    line = clean_lines.elided[linenum]
+    num_open += line.count(startchar) - line.count(endchar)
+  # OK, now find the endchar that actually got us back to even
+  endpos = len(line)
+  while num_open >= 0:
+    endpos = line.rfind(')', 0, endpos)
+    num_open -= 1                 # chopped off another )
+  return (line, linenum, endpos + 1)
+
+
+def CheckForCopyright(filename, lines, error):
+  """Logs an error if no Copyright message appears at the top of the file."""
+
+  # We'll say it should occur by line 10. Don't forget there's a
+  # dummy line at the front.
+  for line in xrange(1, min(len(lines), 11)):
+    if re.search(r'Copyright', lines[line], re.I): break
+  else:                       # means no copyright line was found
+    error(filename, 0, 'legal/copyright', 5,
+          'No copyright message found.  '
+          'You should have a line: "Copyright [year] <Copyright Owner>"')
+
+
+def GetHeaderGuardCPPVariable(filename):
+  """Returns the CPP variable that should be used as a header guard.
+
+  Args:
+    filename: The name of a C++ header file.
+
+  Returns:
+    The CPP variable that should be used as a header guard in the
+    named file.
+
+  """
+
+  fileinfo = FileInfo(filename)
+  return re.sub(r'[-./\s]', '_', fileinfo.RepositoryName()).upper() + '_'
+
+
+def CheckForHeaderGuard(filename, lines, error):
+  """Checks that the file contains a header guard.
+
+  Logs an error if no #ifndef header guard is present.  For google3
+  headers, checks that the full pathname is used.
+
+  Args:
+    filename: The name of the C++ header file.
+    lines: An array of strings, each representing a line of the file.
+    error: The function to call with any errors found.
+  """
+
+  cppvar = GetHeaderGuardCPPVariable(filename)
+
+  ifndef = None
+  ifndef_linenum = 0
+  define = None
+  endif = None
+  endif_linenum = 0
+  for linenum, line in enumerate(lines):
+    linesplit = line.split()
+    if len(linesplit) >= 2:
+      # find the first occurrence of #ifndef and #define, save arg
+      if not ifndef and linesplit[0] == '#ifndef':
+        # set ifndef to the header guard presented on the #ifndef line.
+        ifndef = linesplit[1]
+        ifndef_linenum = linenum
+      if not define and linesplit[0] == '#define':
+        define = linesplit[1]
+    # find the last occurrence of #endif, save entire line
+    if line.startswith('#endif'):
+      endif = line
+      endif_linenum = linenum
+
+  if not ifndef or not define or ifndef != define:
+    error(filename, 0, 'build/header_guard', 5,
+          'No #ifndef header guard found, suggested CPP variable is: %s' %
+          cppvar)
+    return
+
+  # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
+  # for backward compatibility.
+  if ifndef != cppvar:
+    error_level = 0
+    if ifndef != cppvar + '_':
+      error_level = 5
+
+    error(filename, ifndef_linenum, 'build/header_guard', error_level,
+          '#ifndef header guard has wrong style, please use: %s' % cppvar)
+
+  if endif != ('#endif  // %s' % cppvar):
+    error_level = 0
+    if endif != ('#endif  // %s' % (cppvar + '_')):
+      error_level = 5
+
+    error(filename, endif_linenum, 'build/header_guard', error_level,
+          '#endif line should be "#endif  // %s"' % cppvar)
+
+
+def CheckForUnicodeReplacementCharacters(filename, lines, error):
+  """Logs an error for each line containing Unicode replacement characters.
+
+  These indicate that either the file contained invalid UTF-8 (likely)
+  or Unicode replacement characters (which it shouldn't).  Note that
+  it's possible for this to throw off line numbering if the invalid
+  UTF-8 occurred adjacent to a newline.
+
+  Args:
+    filename: The name of the current file.
+    lines: An array of strings, each representing a line of the file.
+    error: The function to call with any errors found.
+  """
+  for linenum, line in enumerate(lines):
+    if u'\ufffd' in line:
+      error(filename, linenum, 'readability/utf8', 5,
+            'Line contains invalid UTF-8 (or Unicode replacement character).')
+
+
+def CheckForNewlineAtEOF(filename, lines, error):
+  """Logs an error if there is no newline char at the end of the file.
+
+  Args:
+    filename: The name of the current file.
+    lines: An array of strings, each representing a line of the file.
+    error: The function to call with any errors found.
+  """
+
+  # The array lines() was created by adding two newlines to the
+  # original file (go figure), then splitting on \n.
+  # To verify that the file ends in \n, we just have to make sure the
+  # last-but-two element of lines() exists and is empty.
+  if len(lines) < 3 or lines[-2]:
+    error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
+          'Could not find a newline character at the end of the file.')
+
+
+def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
+  """Logs an error if we see /* ... */ or "..." that extend past one line.
+
+  /* ... */ comments are legit inside macros, for one line.
+  Otherwise, we prefer // comments, so it's ok to warn about the
+  other.  Likewise, it's ok for strings to extend across multiple
+  lines, as long as a line continuation character (backslash)
+  terminates each line. Although not currently prohibited by the C++
+  style guide, it's ugly and unnecessary. We don't do well with either
+  in this lint program, so we warn about both.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    error: The function to call with any errors found.
+  """
+  line = clean_lines.elided[linenum]
+
+  # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+  # second (escaped) slash may trigger later \" detection erroneously.
+  line = line.replace('\\\\', '')
+
+  if line.count('/*') > line.count('*/'):
+    error(filename, linenum, 'readability/multiline_comment', 5,
+          'Complex multi-line /*...*/-style comment found. '
+          'Lint may give bogus warnings.  '
+          'Consider replacing these with //-style comments, '
+          'with #if 0...#endif, '
+          'or with more clearly structured multi-line comments.')
+
+  if (line.count('"') - line.count('\\"')) % 2:
+    error(filename, linenum, 'readability/multiline_string', 5,
+          'Multi-line string ("...") found.  This lint script doesn\'t '
+          'do well with such strings, and may give bogus warnings.  They\'re '
+          'ugly and unnecessary, and you should use concatenation instead".')
+
+
+threading_list = (
+    ('asctime(', 'asctime_r('),
+    ('ctime(', 'ctime_r('),
+    ('getgrgid(', 'getgrgid_r('),
+    ('getgrnam(', 'getgrnam_r('),
+    ('getlogin(', 'getlogin_r('),
+    ('getpwnam(', 'getpwnam_r('),
+    ('getpwuid(', 'getpwuid_r('),
+    ('gmtime(', 'gmtime_r('),
+    ('localtime(', 'localtime_r('),
+    ('rand(', 'rand_r('),
+    ('readdir(', 'readdir_r('),
+    ('strtok(', 'strtok_r('),
+    ('ttyname(', 'ttyname_r('),
+    )
+
+
+def CheckPosixThreading(filename, clean_lines, linenum, error):
+  """Checks for calls to thread-unsafe functions.
+
+  Much code has been originally written without consideration of
+  multi-threading. Also, engineers are relying on their old experience;
+  they have learned posix before threading extensions were added. These
+  tests guide the engineers to use thread-safe functions (when using
+  posix directly).
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    error: The function to call with any errors found.
+  """
+  line = clean_lines.elided[linenum]
+  for single_thread_function, multithread_safe_function in threading_list:
+    ix = line.find(single_thread_function)
+    if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
+                                line[ix - 1] not in ('_', '.', '>'))):
+      error(filename, linenum, 'runtime/threadsafe_fn', 2,
+            'Consider using ' + multithread_safe_function +
+            '...) instead of ' + single_thread_function +
+            '...) for improved thread safety.')
+
+
+class _ClassInfo(object):
+  """Stores information about a class."""
+
+  def __init__(self, name, linenum):
+    self.name = name
+    self.linenum = linenum
+    self.seen_open_brace = False
+    self.is_derived = False
+    self.virtual_method_linenumber = None
+    self.has_virtual_destructor = False
+    self.brace_depth = 0
+
+
+class _ClassState(object):
+  """Holds the current state of the parse relating to class declarations.
+
+  It maintains a stack of _ClassInfos representing the parser's guess
+  as to the current nesting of class declarations. The innermost class
+  is at the top (back) of the stack. Typically, the stack will either
+  be empty or have exactly one entry.
+  """
+
+  def __init__(self):
+    self.classinfo_stack = []
+
+  def CheckFinished(self, filename, error):
+    """Checks that all classes have been completely parsed.
+
+    Call this when all lines in a file have been processed.
+    Args:
+      filename: The name of the current file.
+      error: The function to call with any errors found.
+    """
+    if self.classinfo_stack:
+      # Note: This test can result in false positives if #ifdef constructs
+      # get in the way of brace matching. See the testBuildClass test in
+      # cpplint_unittest.py for an example of this.
+      error(filename, self.classinfo_stack[0].linenum, 'build/class', 5,
+            'Failed to find complete declaration of class %s' %
+            self.classinfo_stack[0].name)
+
+
+def CheckForNonStandardConstructs(filename, clean_lines, linenum,
+                                  class_state, error):
+  """Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+
+  Complain about several constructs which gcc-2 accepts, but which are
+  not standard C++.  Warning about these in lint is one way to ease the
+  transition to new compilers.
+  - put storage class first (e.g. "static const" instead of "const static").
+  - "%lld" instead of %qd" in printf-type functions.
+  - "%1$d" is non-standard in printf-type functions.
+  - "\%" is an undefined character escape sequence.
+  - text after #endif is not allowed.
+  - invalid inner-style forward declaration.
+  - >? and <? operators, and their >?= and <?= cousins.
+  - classes with virtual methods need virtual destructors (compiler warning
+    available, but not turned on yet.)
+
+  Additionally, check for constructor/destructor style violations as it
+  is very convenient to do so while checking for gcc-2 compliance.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    class_state: A _ClassState instance which maintains information about
+                 the current stack of nested class declarations being parsed.
+    error: A callable to which errors are reported, which takes 4 arguments:
+           filename, line number, error level, and message
+  """
+
+  # Remove comments from the line, but leave in strings for now.
+  line = clean_lines.lines[linenum]
+
+  if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+    error(filename, linenum, 'runtime/printf_format', 3,
+          '%q in format strings is deprecated.  Use %ll instead.')
+
+  if Search(r'printf\s*\(.*".*%\d+\$', line):
+    error(filename, linenum, 'runtime/printf_format', 2,
+          '%N$ formats are unconventional.  Try rewriting to avoid them.')
+
+  # Remove escaped backslashes before looking for undefined escapes.
+  line = line.replace('\\\\', '')
+
+  if Search(r'("|\').*\\(%|\[|\(|{)', line):
+    error(filename, linenum, 'build/printf_format', 3,
+          '%, [, (, and { are undefined character escapes.  Unescape them.')
+
+  # For the rest, work with both comments and strings removed.
+  line = clean_lines.elided[linenum]
+
+  if Search(r'\b(const|volatile|void|char|short|int|long'
+            r'|float|double|signed|unsigned'
+            r'|schar|u?int8|u?int16|u?int32|u?int64)'
+            r'\s+(auto|register|static|extern|typedef)\b',
+            line):
+    error(filename, linenum, 'build/storage_class', 5,
+          'Storage class (static, extern, typedef, etc) should be first.')
+
+  if Match(r'\s*#\s*endif\s*[^/\s]+', line):
+    error(filename, linenum, 'build/endif_comment', 5,
+          'Uncommented text after #endif is non-standard.  Use a comment.')
+
+  if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+    error(filename, linenum, 'build/forward_decl', 5,
+          'Inner-style forward declarations are invalid.  Remove this line.')
+
+  if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
+            line):
+    error(filename, linenum, 'build/deprecated', 3,
+          '>? and <? (max and min) operators are non-standard and deprecated.')
+
+  # Track class entry and exit, and attempt to find cases within the
+  # class declaration that don't meet the C++ style
+  # guidelines. Tracking is very dependent on the code matching Google
+  # style guidelines, but it seems to perform well enough in testing
+  # to be a worthwhile addition to the checks.
+  classinfo_stack = class_state.classinfo_stack
+  # Look for a class declaration
+  class_decl_match = Match(
+      r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
+  if class_decl_match:
+    classinfo_stack.append(_ClassInfo(class_decl_match.group(3), linenum))
+
+  # Everything else in this function uses the top of the stack if it's
+  # not empty.
+  if not classinfo_stack:
+    return
+
+  classinfo = classinfo_stack[-1]
+
+  # If the opening brace hasn't been seen look for it and also
+  # parent class declarations.
+  if not classinfo.seen_open_brace:
+    # If the line has a ';' in it, assume it's a forward declaration or
+    # a single-line class declaration, which we won't process.
+    if line.find(';') != -1:
+      classinfo_stack.pop()
+      return
+    classinfo.seen_open_brace = (line.find('{') != -1)
+    # Look for a bare ':'
+    if Search('(^|[^:]):($|[^:])', line):
+      classinfo.is_derived = True
+    if not classinfo.seen_open_brace:
+      return  # Everything else in this function is for after open brace
+
+  # The class may have been declared with namespace or classname qualifiers.
+  # The constructor and destructor will not have those qualifiers.
+  base_classname = classinfo.name.split('::')[-1]
+
+  # Look for single-argument constructors that aren't marked explicit.
+  # Technically a valid construct, but against style.
+  args = Match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
+               % re.escape(base_classname),
+               line)
+  if (args and
+      args.group(1) != 'void' and
+      not Match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
+                args.group(1).strip())):
+    error(filename, linenum, 'runtime/explicit', 5,
+          'Single-argument constructors should be marked explicit.')
+
+  # Look for methods declared virtual.
+  if Search(r'\bvirtual\b', line):
+    classinfo.virtual_method_linenumber = linenum
+    # Only look for a destructor declaration on the same line. It would
+    # be extremely unlikely for the destructor declaration to occupy
+    # more than one line.
+    if Search(r'~%s\s*\(' % base_classname, line):
+      classinfo.has_virtual_destructor = True
+
+  # Look for class end.
+  brace_depth = classinfo.brace_depth
+  brace_depth = brace_depth + line.count('{') - line.count('}')
+  if brace_depth <= 0:
+    classinfo = classinfo_stack.pop()
+    # Try to detect missing virtual destructor declarations.
+    # For now, only warn if a non-derived class with virtual methods lacks
+    # a virtual destructor. This is to make it less likely that people will
+    # declare derived virtual destructors without declaring the base
+    # destructor virtual.
+    if ((classinfo.virtual_method_linenumber is not None) and
+        (not classinfo.has_virtual_destructor) and
+        (not classinfo.is_derived)):  # Only warn for base classes
+      error(filename, classinfo.linenum, 'runtime/virtual', 4,
+            'The class %s probably needs a virtual destructor due to '
+            'having virtual method(s), one declared at line %d.'
+            % (classinfo.name, classinfo.virtual_method_linenumber))
+  else:
+    classinfo.brace_depth = brace_depth
+
+
+def CheckSpacingForFunctionCall(filename, line, linenum, error):
+  """Checks for the correctness of various spacing around function calls.
+
+  Args:
+    filename: The name of the current file.
+    line: The text of the line to check.
+    linenum: The number of the line to check.
+    error: The function to call with any errors found.
+  """
+
+  # Since function calls often occur inside if/for/while/switch
+  # expressions - which have their own, more liberal conventions - we
+  # first see if we should be looking inside such an expression for a
+  # function call, to which we can apply more strict standards.
+  fncall = line    # if there's no control flow construct, look at whole line
+  for pattern in (r'\bif\s*\((.*)\)\s*{',
+                  r'\bfor\s*\((.*)\)\s*{',
+                  r'\bwhile\s*\((.*)\)\s*[{;]',
+                  r'\bswitch\s*\((.*)\)\s*{'):
+    match = Search(pattern, line)
+    if match:
+      fncall = match.group(1)    # look inside the parens for function calls
+      break
+
+  # Except in if/for/while/switch, there should never be space
+  # immediately inside parens (eg "f( 3, 4 )").  We make an exception
+  # for nested parens ( (a+b) + c ).  Likewise, there should never be
+  # a space before a ( when it's a function argument.  I assume it's a
+  # function argument when the char before the whitespace is legal in
+  # a function name (alnum + _) and we're not starting a macro. Also ignore
+  # pointers and references to arrays and functions coz they're too tricky:
+  # we use a very simple way to recognize these:
+  # " (something)(maybe-something)" or
+  # " (something)(maybe-something," or
+  # " (something)[something]"
+  # Note that we assume the contents of [] to be short enough that
+  # they'll never need to wrap.
+  if (  # Ignore control structures.
+      not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
+      # Ignore pointers/references to functions.
+      not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
+      # Ignore pointers/references to arrays.
+      not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
+    if Search(r'\w\s*\(\s', fncall):      # a ( used for a fn call
+      error(filename, linenum, 'whitespace/parens', 4,
+            'Extra space after ( in function call')
+    elif Search(r'\(\s+[^(]', fncall):
+      error(filename, linenum, 'whitespace/parens', 2,
+            'Extra space after (')
+    if (Search(r'\w\s+\(', fncall) and
+        not Search(r'#\s*define|typedef', fncall)):
+      error(filename, linenum, 'whitespace/parens', 4,
+            'Extra space before ( in function call')
+    # If the ) is followed only by a newline or a { + newline, assume it's
+    # part of a control statement (if/while/etc), and don't complain
+    if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
+      error(filename, linenum, 'whitespace/parens', 2,
+            'Extra space before )')
+
+
+def IsBlankLine(line):
+  """Returns true if the given line is blank.
+
+  We consider a line to be blank if the line is empty or consists of
+  only white spaces.
+
+  Args:
+    line: A line of a string.
+
+  Returns:
+    True, if the given line is blank.
+  """
+  return not line or line.isspace()
+
+
+def CheckForFunctionLengths(filename, clean_lines, linenum,
+                            function_state, error):
+  """Reports for long function bodies.
+
+  For an overview why this is done, see:
+  http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
+
+  Uses a simplistic algorithm assuming other style guidelines
+  (especially spacing) are followed.
+  Only checks unindented functions, so class members are unchecked.
+  Trivial bodies are unchecked, so constructors with huge initializer lists
+  may be missed.
+  Blank/comment lines are not counted so as to avoid encouraging the removal
+  of vertical space and commments just to get through a lint check.
+  NOLINT *on the last line of a function* disables this check.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    function_state: Current function name and lines in body so far.
+    error: The function to call with any errors found.
+  """
+  lines = clean_lines.lines
+  line = lines[linenum]
+  raw = clean_lines.raw_lines
+  raw_line = raw[linenum]
+  joined_line = ''
+
+  starting_func = False
+  regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
+  match_result = Match(regexp, line)
+  if match_result:
+    # If the name is all caps and underscores, figure it's a macro and
+    # ignore it, unless it's TEST or TEST_F.
+    function_name = match_result.group(1).split()[-1]
+    if function_name == 'TEST' or function_name == 'TEST_F' or (
+        not Match(r'[A-Z_]+$', function_name)):
+      starting_func = True
+
+  if starting_func:
+    body_found = False
+    # Don't look too far for the function body. Lint might be mistaken about
+    # whether it's a function definition.
+    for start_linenum in xrange(linenum,
+                                min(linenum+100, clean_lines.NumLines())):
+      start_line = lines[start_linenum]
+      joined_line += ' ' + start_line.lstrip()
+      if Search(r'(;|})', start_line):  # Declarations and trivial functions
+        body_found = True
+        break                              # ... ignore
+      elif Search(r'{', start_line):
+        body_found = True
+        function = Search(r'((\w|:)*)\(', line).group(1)
+        if Match(r'TEST', function):    # Handle TEST... macros
+          parameter_regexp = Search(r'(\(.*\))', joined_line)
+          if parameter_regexp:             # Ignore bad syntax
+            function += parameter_regexp.group(1)
+        else:
+          function += '()'
+        function_state.Begin(function)
+        break
+    if not body_found:
+      # 50 lines after finding a line deemed to start a function
+      # definition, no body for the function was found. A macro
+      # invocation with no terminating semicolon could trigger this.
+      error(filename, linenum, 'readability/fn_size', 5,
+            'Lint failed to find start of function body.')
+  elif Match(r'^\}\s*$', line):  # function end
+    if not Search(r'\bNOLINT\b', raw_line):
+      function_state.Check(error, filename, linenum)
+    function_state.End()
+  elif not Match(r'^\s*$', line):
+    function_state.Count()  # Count non-blank/non-comment lines.
+
+
+_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
+
+
+def CheckComment(comment, filename, linenum, error):
+  """Checks for common mistakes in TODO comments.
+
+  Args:
+    comment: The text of the comment from the line in question.
+    filename: The name of the current file.
+    linenum: The number of the line to check.
+    error: The function to call with any errors found.
+  """
+  match = _RE_PATTERN_TODO.match(comment)
+  if match:
+    # One whitespace is correct; zero whitespace is handled elsewhere.
+    leading_whitespace = match.group(1)
+    if len(leading_whitespace) > 1:
+      error(filename, linenum, 'whitespace/todo', 2,
+            'Too many spaces before TODO')
+
+    username = match.group(2)
+    if not username:
+      error(filename, linenum, 'readability/todo', 2,
+            'Missing username in TODO; it should look like '
+            '"// TODO(my_username): Stuff."')
+
+    middle_whitespace = match.group(3)
+    if middle_whitespace != ' ' and middle_whitespace != '':
+      error(filename, linenum, 'whitespace/todo', 2,
+            'TODO(my_username) should be followed by a space')
+
+
+def CheckSpacing(filename, clean_lines, linenum, error):
+  """Checks for the correctness of various spacing issues in the code.
+
+  Things we check for: spaces around operators, spaces after
+  if/for/while/switch, no spaces around parens in function calls, two
+  spaces between code and comment, don't start a block with a blank
+  line, don't end a function with a blank line, don't have too many
+  blank lines in a row.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    error: The function to call with any errors found.
+  """
+
+  raw = clean_lines.raw_lines
+  line = raw[linenum]
+
+  # Before nixing comments, check if the line is blank for no good
+  # reason.  This includes the first line after a block is opened, and
+  # blank lines at the end of a function (ie, right before a line like '}'
+  if IsBlankLine(line):
+    elided = clean_lines.elided
+    prev_line = elided[linenum - 1]
+    prevbrace = prev_line.rfind('{')
+    # TODO(unknown): Don't complain if line before blank line, and line after,
+    #                both start with alnums and are indented the same amount.
+    #                This ignores whitespace at the start of a namespace block
+    #                because those are not usually indented.
+    if (prevbrace != -1 and prev_line[prevbrace:].find('}') == -1
+        and prev_line[:prevbrace].find('namespace') == -1):
+      # OK, we have a blank line at the start of a code block.  Before we
+      # complain, we check if it is an exception to the rule: The previous
+      # non-empty line has the paramters of a function header that are indented
+      # 4 spaces (because they did not fit in a 80 column line when placed on
+      # the same line as the function name).  We also check for the case where
+      # the previous line is indented 6 spaces, which may happen when the
+      # initializers of a constructor do not fit into a 80 column line.
+      exception = False
+      if Match(r' {6}\w', prev_line):  # Initializer list?
+        # We are looking for the opening column of initializer list, which
+        # should be indented 4 spaces to cause 6 space indentation afterwards.
+        search_position = linenum-2
+        while (search_position >= 0
+               and Match(r' {6}\w', elided[search_position])):
+          search_position -= 1
+        exception = (search_position >= 0
+                     and elided[search_position][:5] == '    :')
+      else:
+        # Search for the function arguments or an initializer list.  We use a
+        # simple heuristic here: If the line is indented 4 spaces; and we have a
+        # closing paren, without the opening paren, followed by an opening brace
+        # or colon (for initializer lists) we assume that it is the last line of
+        # a function header.  If we have a colon indented 4 spaces, it is an
+        # initializer list.
+        exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+                           prev_line)
+                     or Match(r' {4}:', prev_line))
+
+      if not exception:
+        error(filename, linenum, 'whitespace/blank_line', 2,
+              'Blank line at the start of a code block.  Is this needed?')
+    # This doesn't ignore whitespace at the end of a namespace block
+    # because that is too hard without pairing open/close braces;
+    # however, a special exception is made for namespace closing
+    # brackets which have a comment containing "namespace".
+    #
+    # Also, ignore blank lines at the end of a block in a long if-else
+    # chain, like this:
+    #   if (condition1) {
+    #     // Something followed by a blank line
+    #
+    #   } else if (condition2) {
+    #     // Something else
+    #   }
+    if linenum + 1 < clean_lines.NumLines():
+      next_line = raw[linenum + 1]
+      if (next_line
+          and Match(r'\s*}', next_line)
+          and next_line.find('namespace') == -1
+          and next_line.find('} else ') == -1):
+        error(filename, linenum, 'whitespace/blank_line', 3,
+              'Blank line at the end of a code block.  Is this needed?')
+
+  # Next, we complain if there's a comment too near the text
+  commentpos = line.find('//')
+  if commentpos != -1:
+    # Check if the // may be in quotes.  If so, ignore it
+    if (line.count('"', 0, commentpos) -
+        line.count('\\"', 0, commentpos)) % 2 == 0:   # not in quotes
+      # Allow one space for new scopes, two spaces otherwise:
+      if (not Match(r'^\s*{ //', line) and
+          ((commentpos >= 1 and
+            line[commentpos-1] not in string.whitespace) or
+           (commentpos >= 2 and
+            line[commentpos-2] not in string.whitespace))):
+        error(filename, linenum, 'whitespace/comments', 2,
+              'At least two spaces is best between code and comments')
+      # There should always be a space between the // and the comment
+      commentend = commentpos + 2
+      if commentend < len(line) and not line[commentend] == ' ':
+        # but some lines are exceptions -- e.g. if they're big
+        # comment delimiters like:
+        # //----------------------------------------------------------
+        match = Search(r'[=/-]{4,}\s*$', line[commentend:])
+        if not match:
+          error(filename, linenum, 'whitespace/comments', 4,
+                'Should have a space between // and comment')
+      CheckComment(line[commentpos:], filename, linenum, error)
+
+  line = clean_lines.elided[linenum]  # get rid of comments and strings
+
+  # Don't try to do spacing checks for operator methods
+  line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
+
+  # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
+  # Otherwise not.  Note we only check for non-spaces on *both* sides;
+  # sometimes people put non-spaces on one side when aligning ='s among
+  # many lines (not that this is behavior that I approve of...)
+  if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
+    error(filename, linenum, 'whitespace/operators', 4,
+          'Missing spaces around =')
+
+  # It's ok not to have spaces around binary operators like + - * /, but if
+  # there's too little whitespace, we get concerned.  It's hard to tell,
+  # though, so we punt on this one for now.  TODO.
+
+  # You should always have whitespace around binary operators.
+  # Alas, we can't test < or > because they're legitimately used sans spaces
+  # (a->b, vector<int> a).  The only time we can tell is a < with no >, and
+  # only if it's not template params list spilling into the next line.
+  match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
+  if not match:
+    # Note that while it seems that the '<[^<]*' term in the following
+    # regexp could be simplified to '<.*', which would indeed match
+    # the same class of strings, the [^<] means that searching for the
+    # regexp takes linear rather than quadratic time.
+    if not Search(r'<[^<]*,\s*$', line):  # template params spill
+      match = Search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
+  if match:
+    error(filename, linenum, 'whitespace/operators', 3,
+          'Missing spaces around %s' % match.group(1))
+  # We allow no-spaces around << and >> when used like this: 10<<20, but
+  # not otherwise (particularly, not when used as streams)
+  match = Search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
+  if match:
+    error(filename, linenum, 'whitespace/operators', 3,
+          'Missing spaces around %s' % match.group(1))
+
+  # There shouldn't be space around unary operators
+  match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+  if match:
+    error(filename, linenum, 'whitespace/operators', 4,
+          'Extra space for operator %s' % match.group(1))
+
+  # A pet peeve of mine: no spaces after an if, while, switch, or for
+  match = Search(r' (if\(|for\(|while\(|switch\()', line)
+  if match:
+    error(filename, linenum, 'whitespace/parens', 5,
+          'Missing space before ( in %s' % match.group(1))
+
+  # For if/for/while/switch, the left and right parens should be
+  # consistent about how many spaces are inside the parens, and
+  # there should either be zero or one spaces inside the parens.
+  # We don't want: "if ( foo)" or "if ( foo   )".
+  # Exception: "for ( ; foo; bar)" is allowed.
+  match = Search(r'\b(if|for|while|switch)\s*'
+                 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
+                 line)
+  if match:
+    if len(match.group(2)) != len(match.group(4)):
+      if not (match.group(3) == ';' and
+              len(match.group(2)) == 1 + len(match.group(4))):
+        error(filename, linenum, 'whitespace/parens', 5,
+              'Mismatching spaces inside () in %s' % match.group(1))
+    if not len(match.group(2)) in [0, 1]:
+      error(filename, linenum, 'whitespace/parens', 5,
+            'Should have zero or one spaces inside ( and ) in %s' %
+            match.group(1))
+
+  # You should always have a space after a comma (either as fn arg or operator)
+  if Search(r',[^\s]', line):
+    error(filename, linenum, 'whitespace/comma', 3,
+          'Missing space after ,')
+
+  # Next we will look for issues with function calls.
+  CheckSpacingForFunctionCall(filename, line, linenum, error)
+
+  # Except after an opening paren, you should have spaces before your braces.
+  # And since you should never have braces at the beginning of a line, this is
+  # an easy test.
+  if Search(r'[^ (]{', line):
+    error(filename, linenum, 'whitespace/braces', 5,
+          'Missing space before {')
+
+  # Make sure '} else {' has spaces.
+  if Search(r'}else', line):
+    error(filename, linenum, 'whitespace/braces', 5,
+          'Missing space before else')
+
+  # You shouldn't have spaces before your brackets, except maybe after
+  # 'delete []' or 'new char * []'.
+  if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
+    error(filename, linenum, 'whitespace/braces', 5,
+          'Extra space before [')
+
+  # You shouldn't have a space before a semicolon at the end of the line.
+  # There's a special case for "for" since the style guide allows space before
+  # the semicolon there.
+  if Search(r':\s*;\s*$', line):
+    error(filename, linenum, 'whitespace/semicolon', 5,
+          'Semicolon defining empty statement. Use { } instead.')
+  elif Search(r'^\s*;\s*$', line):
+    error(filename, linenum, 'whitespace/semicolon', 5,
+          'Line contains only semicolon. If this should be an empty statement, '
+          'use { } instead.')
+  elif (Search(r'\s+;\s*$', line) and
+        not Search(r'\bfor\b', line)):
+    error(filename, linenum, 'whitespace/semicolon', 5,
+          'Extra space before last semicolon. If this should be an empty '
+          'statement, use { } instead.')
+
+
+def GetPreviousNonBlankLine(clean_lines, linenum):
+  """Return the most recent non-blank line and its line number.
+
+  Args:
+    clean_lines: A CleansedLines instance containing the file contents.
+    linenum: The number of the line to check.
+
+  Returns:
+    A tuple with two elements.  The first element is the contents of the last
+    non-blank line before the current line, or the empty string if this is the
+    first non-blank line.  The second is the line number of that line, or -1
+    if this is the first non-blank line.
+  """
+
+  prevlinenum = linenum - 1
+  while prevlinenum >= 0:
+    prevline = clean_lines.elided[prevlinenum]
+    if not IsBlankLine(prevline):     # if not a blank line...
+      return (prevline, prevlinenum)
+    prevlinenum -= 1
+  return ('', -1)
+
+
+def CheckBraces(filename, clean_lines, linenum, error):
+  """Looks for misplaced braces (e.g. at the end of line).
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    error: The function to call with any errors found.
+  """
+
+  line = clean_lines.elided[linenum]        # get rid of comments and strings
+
+  if Match(r'\s*{\s*$', line):
+    # We allow an open brace to start a line in the case where someone
+    # is using braces in a block to explicitly create a new scope,
+    # which is commonly used to control the lifetime of
+    # stack-allocated variables.  We don't detect this perfectly: we
+    # just don't complain if the last non-whitespace character on the
+    # previous non-blank line is ';', ':', '{', or '}'.
+    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+    if not Search(r'[;:}{]\s*$', prevline):
+      error(filename, linenum, 'whitespace/braces', 4,
+            '{ should almost always be at the end of the previous line')
+
+  # An else clause should be on the same line as the preceding closing brace.
+  if Match(r'\s*else\s*', line):
+    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
+    if Match(r'\s*}\s*$', prevline):
+      error(filename, linenum, 'whitespace/newline', 4,
+            'An else should appear on the same line as the preceding }')
+
+  # If braces come on one side of an else, they should be on both.
+  # However, we have to worry about "else if" that spans multiple lines!
+  if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
+    if Search(r'}\s*else if([^{]*)$', line):       # could be multi-line if
+      # find the ( after the if
+      pos = line.find('else if')
+      pos = line.find('(', pos)
+      if pos > 0:
+        (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
+        if endline[endpos:].find('{') == -1:    # must be brace after if
+          error(filename, linenum, 'readability/braces', 5,
+                'If an else has a brace on one side, it should have it on both')
+    else:            # common case: else not followed by a multi-line if
+      error(filename, linenum, 'readability/braces', 5,
+            'If an else has a brace on one side, it should have it on both')
+
+  # Likewise, an else should never have the else clause on the same line
+  if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
+    error(filename, linenum, 'whitespace/newline', 4,
+          'Else clause should never be on same line as else (use 2 lines)')
+
+  # In the same way, a do/while should never be on one line
+  if Match(r'\s*do [^\s{]', line):
+    error(filename, linenum, 'whitespace/newline', 4,
+          'do/while clauses should not be on a single line')
+
+  # Braces shouldn't be followed by a ; unless they're defining a struct
+  # or initializing an array.
+  # We can't tell in general, but we can for some common cases.
+  prevlinenum = linenum
+  while True:
+    (prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
+    if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
+      line = prevline + line
+    else:
+      break
+  if (Search(r'{.*}\s*;', line) and
+      line.count('{') == line.count('}') and
+      not Search(r'struct|class|enum|\s*=\s*{', line)):
+    error(filename, linenum, 'readability/braces', 4,
+          "You don't need a ; after a }")
+
+
+def ReplaceableCheck(operator, macro, line):
+  """Determine whether a basic CHECK can be replaced with a more specific one.
+
+  For example suggest using CHECK_EQ instead of CHECK(a == b) and
+  similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
+
+  Args:
+    operator: The C++ operator used in the CHECK.
+    macro: The CHECK or EXPECT macro being called.
+    line: The current source line.
+
+  Returns:
+    True if the CHECK can be replaced with a more specific one.
+  """
+
+  # This matches decimal and hex integers, strings, and chars (in that order).
+  match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
+
+  # Expression to match two sides of the operator with something that
+  # looks like a literal, since CHECK(x == iterator) won't compile.
+  # This means we can't catch all the cases where a more specific
+  # CHECK is possible, but it's less annoying than dealing with
+  # extraneous warnings.
+  match_this = (r'\s*' + macro + r'\((\s*' +
+                match_constant + r'\s*' + operator + r'[^<>].*|'
+                r'.*[^<>]' + operator + r'\s*' + match_constant +
+                r'\s*\))')
+
+  # Don't complain about CHECK(x == NULL) or similar because
+  # CHECK_EQ(x, NULL) won't compile (requires a cast).
+  # Also, don't complain about more complex boolean expressions
+  # involving && or || such as CHECK(a == b || c == d).
+  return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
+
+
+def CheckCheck(filename, clean_lines, linenum, error):
+  """Checks the use of CHECK and EXPECT macros.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    error: The function to call with any errors found.
+  """
+
+  # Decide the set of replacement macros that should be suggested
+  raw_lines = clean_lines.raw_lines
+  current_macro = ''
+  for macro in _CHECK_MACROS:
+    if raw_lines[linenum].find(macro) >= 0:
+      current_macro = macro
+      break
+  if not current_macro:
+    # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
+    return
+
+  line = clean_lines.elided[linenum]        # get rid of comments and strings
+
+  # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
+  for operator in ['==', '!=', '>=', '>', '<=', '<']:
+    if ReplaceableCheck(operator, current_macro, line):
+      error(filename, linenum, 'readability/check', 2,
+            'Consider using %s instead of %s(a %s b)' % (
+                _CHECK_REPLACEMENT[current_macro][operator],
+                current_macro, operator))
+      break
+
+
+def GetLineWidth(line):
+  """Determines the width of the line in column positions.
+
+  Args:
+    line: A string, which may be a Unicode string.
+
+  Returns:
+    The width of the line in column positions, accounting for Unicode
+    combining characters and wide characters.
+  """
+  if isinstance(line, unicode):
+    width = 0
+    for c in unicodedata.normalize('NFC', line):
+      if unicodedata.east_asian_width(c) in ('W', 'F'):
+        width += 2
+      elif not unicodedata.combining(c):
+        width += 1
+    return width
+  else:
+    return len(line)
+
+
+def CheckStyle(filename, clean_lines, linenum, file_extension, error):
+  """Checks rules from the 'C++ style rules' section of cppguide.html.
+
+  Most of these rules are hard to test (naming, comment style), but we
+  do what we can.  In particular we check for 2-space indents, line lengths,
+  tab usage, spaces inside code, etc.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    file_extension: The extension (without the dot) of the filename.
+    error: The function to call with any errors found.
+  """
+
+  raw_lines = clean_lines.raw_lines
+  line = raw_lines[linenum]
+
+  if line.find('\t') != -1:
+    error(filename, linenum, 'whitespace/tab', 1,
+          'Tab found; better to use spaces')
+
+  # One or three blank spaces at the beginning of the line is weird; it's
+  # hard to reconcile that with 2-space indents.
+  # NOTE: here are the conditions rob pike used for his tests.  Mine aren't
+  # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces
+  # if(RLENGTH > 20) complain = 0;
+  # if(match($0, " +(error|private|public|protected):")) complain = 0;
+  # if(match(prev, "&& *$")) complain = 0;
+  # if(match(prev, "\\|\\| *$")) complain = 0;
+  # if(match(prev, "[\",=><] *$")) complain = 0;
+  # if(match($0, " <<")) complain = 0;
+  # if(match(prev, " +for \\(")) complain = 0;
+  # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
+  initial_spaces = 0
+  cleansed_line = clean_lines.elided[linenum]
+  while initial_spaces < len(line) and line[initial_spaces] == ' ':
+    initial_spaces += 1
+  if line and line[-1].isspace():
+    error(filename, linenum, 'whitespace/end_of_line', 4,
+          'Line ends in whitespace.  Consider deleting these extra spaces.')
+  # There are certain situations we allow one space, notably for labels
+  elif ((initial_spaces == 1 or initial_spaces == 3) and
+        not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
+    error(filename, linenum, 'whitespace/indent', 3,
+          'Weird number of spaces at line-start.  '
+          'Are you using a 2-space indent?')
+  # Labels should always be indented at least one space.
+  elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
+                                                          line):
+    error(filename, linenum, 'whitespace/labels', 4,
+          'Labels should always be indented at least one space.  '
+          'If this is a member-initializer list in a constructor, '
+          'the colon should be on the line after the definition header.')
+
+  # Check if the line is a header guard.
+  is_header_guard = False
+  if file_extension == 'h':
+    cppvar = GetHeaderGuardCPPVariable(filename)
+    if (line.startswith('#ifndef %s' % cppvar) or
+        line.startswith('#define %s' % cppvar) or
+        line.startswith('#endif  // %s' % cppvar)):
+      is_header_guard = True
+  # #include lines and header guards can be long, since there's no clean way to
+  # split them.
+  if not line.startswith('#include') and not is_header_guard:
+    line_width = GetLineWidth(line)
+    if line_width > 100:
+      error(filename, linenum, 'whitespace/line_length', 4,
+            'Lines should very rarely be longer than 100 characters')
+    elif line_width > 80:
+      error(filename, linenum, 'whitespace/line_length', 2,
+            'Lines should be <= 80 characters long')
+
+  if (cleansed_line.count(';') > 1 and
+      # for loops are allowed two ;'s (and may run over two lines).
+      cleansed_line.find('for') == -1 and
+      (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
+       GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
+      # It's ok to have many commands in a switch case that fits in 1 line
+      not ((cleansed_line.find('case ') != -1 or
+            cleansed_line.find('default:') != -1) and
+           cleansed_line.find('break;') != -1)):
+    error(filename, linenum, 'whitespace/newline', 4,
+          'More than one command on the same line')
+
+  # Some more style checks
+  CheckBraces(filename, clean_lines, linenum, error)
+  CheckSpacing(filename, clean_lines, linenum, error)
+  CheckCheck(filename, clean_lines, linenum, error)
+
+
+_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
+_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
+# Matches the first component of a filename delimited by -s and _s. That is:
+#  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
+#  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
+#  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
+#  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
+_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
+
+
+def _DropCommonSuffixes(filename):
+  """Drops common suffixes like _test.cc or -inl.h from filename.
+
+  For example:
+    >>> _DropCommonSuffixes('foo/foo-inl.h')
+    'foo/foo'
+    >>> _DropCommonSuffixes('foo/bar/foo.cc')
+    'foo/bar/foo'
+    >>> _DropCommonSuffixes('foo/foo_internal.h')
+    'foo/foo'
+    >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
+    'foo/foo_unusualinternal'
+
+  Args:
+    filename: The input filename.
+
+  Returns:
+    The filename with the common suffix removed.
+  """
+  for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
+                 'inl.h', 'impl.h', 'internal.h'):
+    if (filename.endswith(suffix) and len(filename) > len(suffix) and
+        filename[-len(suffix) - 1] in ('-', '_')):
+      return filename[:-len(suffix) - 1]
+  return os.path.splitext(filename)[0]
+
+
+def _IsTestFilename(filename):
+  """Determines if the given filename has a suffix that identifies it as a test.
+
+  Args:
+    filename: The input filename.
+
+  Returns:
+    True if 'filename' looks like a test, False otherwise.
+  """
+  if (filename.endswith('_test.cc') or
+      filename.endswith('_unittest.cc') or
+      filename.endswith('_regtest.cc')):
+    return True
+  else:
+    return False
+
+
+def _ClassifyInclude(fileinfo, include, is_system):
+  """Figures out what kind of header 'include' is.
+
+  Args:
+    fileinfo: The current file cpplint is running over. A FileInfo instance.
+    include: The path to a #included file.
+    is_system: True if the #include used <> rather than "".
+
+  Returns:
+    One of the _XXX_HEADER constants.
+
+  For example:
+    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
+    _C_SYS_HEADER
+    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
+    _CPP_SYS_HEADER
+    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
+    _LIKELY_MY_HEADER
+    >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
+    ...                  'bar/foo_other_ext.h', False)
+    _POSSIBLE_MY_HEADER
+    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
+    _OTHER_HEADER
+  """
+  # This is a list of all standard c++ header files, except
+  # those already checked for above.
+  is_stl_h = include in _STL_HEADERS
+  is_cpp_h = is_stl_h or include in _CPP_HEADERS
+
+  if is_system:
+    if is_cpp_h:
+      return _CPP_SYS_HEADER
+    else:
+      return _C_SYS_HEADER
+
+  # If the target file and the include we're checking share a
+  # basename when we drop common extensions, and the include
+  # lives in . , then it's likely to be owned by the target file.
+  target_dir, target_base = (
+      os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
+  include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
+  if target_base == include_base and (
+      include_dir == target_dir or
+      include_dir == os.path.normpath(target_dir + '/../public')):
+    return _LIKELY_MY_HEADER
+
+  # If the target and include share some initial basename
+  # component, it's possible the target is implementing the
+  # include, so it's allowed to be first, but we'll never
+  # complain if it's not there.
+  target_first_component = _RE_FIRST_COMPONENT.match(target_base)
+  include_first_component = _RE_FIRST_COMPONENT.match(include_base)
+  if (target_first_component and include_first_component and
+      target_first_component.group(0) ==
+      include_first_component.group(0)):
+    return _POSSIBLE_MY_HEADER
+
+  return _OTHER_HEADER
+
+
+def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
+                  error):
+  """Checks rules from the 'C++ language rules' section of cppguide.html.
+
+  Some of these rules are hard to test (function overloading, using
+  uint32 inappropriately), but we do the best we can.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    linenum: The number of the line to check.
+    file_extension: The extension (without the dot) of the filename.
+    include_state: An _IncludeState instance in which the headers are inserted.
+    error: The function to call with any errors found.
+  """
+  fileinfo = FileInfo(filename)
+
+  # get rid of comments
+  comment_elided_line = clean_lines.lines[linenum]
+
+  # "include" should use the new style "foo/bar.h" instead of just "bar.h"
+  if _RE_PATTERN_INCLUDE_NEW_STYLE.search(comment_elided_line):
+    error(filename, linenum, 'build/include', 4,
+          'Include the directory when naming .h files')
+
+  # we shouldn't include a file more than once. actually, there are a
+  # handful of instances where doing so is okay, but in general it's
+  # not.
+  match = _RE_PATTERN_INCLUDE.search(comment_elided_line)
+  if match:
+    include = match.group(2)
+    is_system = (match.group(1) == '<')
+    if include in include_state:
+      error(filename, linenum, 'build/include', 4,
+            '"%s" already included at %s:%s' %
+            (include, filename, include_state[include]))
+    else:
+      include_state[include] = linenum
+
+      # We want to ensure that headers appear in the right order:
+      # 1) for foo.cc, foo.h  (preferred location)
+      # 2) c system files
+      # 3) cpp system files
+      # 4) for foo.cc, foo.h  (deprecated location)
+      # 5) other google headers
+      #
+      # We classify each include statement as one of those 5 types
+      # using a number of techniques. The include_state object keeps
+      # track of the highest type seen, and complains if we see a
+      # lower type after that.
+      error_message = include_state.CheckNextIncludeOrder(
+          _ClassifyInclude(fileinfo, include, is_system))
+      if error_message:
+        error(filename, linenum, 'build/include_order', 4,
+              '%s. Should be: %s.h, c system, c++ system, other.' %
+              (error_message, fileinfo.BaseName()))
+
+  # If the line is empty or consists of entirely a comment, no need to
+  # check it.
+  line = clean_lines.elided[linenum]
+  if not line:
+    return
+
+  # Create an extended_line, which is the concatenation of the current and
+  # next lines, for more effective checking of code that may span more than one
+  # line.
+  if linenum + 1 < clean_lines.NumLines():
+    extended_line = line + clean_lines.elided[linenum + 1]
+  else:
+    extended_line = line
+
+  # Make Windows paths like Unix.
+  fullname = os.path.abspath(filename).replace('\\', '/')
+
+  # TODO(unknown): figure out if they're using default arguments in fn proto.
+
+  # Look for any of the stream classes that are part of standard C++.
+  match = _RE_PATTERN_INCLUDE.match(line)
+  if match:
+    include = match.group(2)
+    if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
+      # Many unit tests use cout, so we exempt them.
+      if not _IsTestFilename(filename):
+        error(filename, linenum, 'readability/streams', 3,
+              'Streams are highly discouraged.')
+
+  # Check for non-const references in functions.  This is tricky because &
+  # is also used to take the address of something.  We allow <> for templates,
+  # (ignoring whatever is between the braces) and : for classes.
+  # These are complicated re's.  They try to capture the following:
+  # paren (for fn-prototype start), typename, &, varname.  For the const
+  # version, we're willing for const to be before typename or after
+  # Don't check the implemention on same line.
+  fnline = line.split('{', 1)[0]
+  if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
+      len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
+                     r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
+      len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
+                     fnline))):
+
+    # We allow non-const references in a few standard places, like functions
+    # called "swap()" or iostream operators like "<<" or ">>".
+    if not Search(
+        r'(swap|Swap|operator[<>][<>])\s*\(\s*(?:[\w:]|<.*>)+\s*&',
+        fnline):
+      error(filename, linenum, 'runtime/references', 2,
+            'Is this a non-const reference? '
+            'If so, make const or use a pointer.')
+
+  # Check to see if they're using an conversion function cast.
+  # I just try to capture the most common basic types, though there are more.
+  # Parameterless conversion functions, such as bool(), are allowed as they are
+  # probably a member operator declaration or default constructor.
+  match = Search(
+      r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
+  if match:
+    # gMock methods are defined using some variant of MOCK_METHODx(name, type)
+    # where type may be float(), int(string), etc.  Without context they are
+    # virtually indistinguishable from int(x) casts.
+    if not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
+      error(filename, linenum, 'readability/casting', 4,
+            'Using deprecated casting style.  '
+            'Use static_cast<%s>(...) instead' %
+            match.group(1))
+
+  CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+                  'static_cast',
+                  r'\((int|float|double|bool|char|u?int(16|32|64))\)',
+                  error)
+  # This doesn't catch all cases.  Consider (const char * const)"hello".
+  CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
+                  'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
+
+  # In addition, we look for people taking the address of a cast.  This
+  # is dangerous -- casts can assign to temporaries, so the pointer doesn't
+  # point where you think.
+  if Search(
+      r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
+    error(filename, linenum, 'runtime/casting', 4,
+          ('Are you taking an address of a cast?  '
+           'This is dangerous: could be a temp var.  '
+           'Take the address before doing the cast, rather than after'))
+
+  # Check for people declaring static/global STL strings at the top level.
+  # This is dangerous because the C++ language does not guarantee that
+  # globals with constructors are initialized before the first access.
+  match = Match(
+      r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
+      line)
+  # Make sure it's not a function.
+  # Function template specialization looks like: "string foo<Type>(...".
+  # Class template definitions look like: "string Foo<Type>::Method(...".
+  if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
+                         match.group(3)):
+    error(filename, linenum, 'runtime/string', 4,
+          'For a static/global string constant, use a C style string instead: '
+          '"%schar %s[]".' %
+          (match.group(1), match.group(2)))
+
+  # Check that we're not using RTTI outside of testing code.
+  if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
+    error(filename, linenum, 'runtime/rtti', 5,
+          'Do not use dynamic_cast<>.  If you need to cast within a class '
+          "hierarchy, use static_cast<> to upcast.  Google doesn't support "
+          'RTTI.')
+
+  if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
+    error(filename, linenum, 'runtime/init', 4,
+          'You seem to be initializing a member variable with itself.')
+
+  if file_extension == 'h':
+    # TODO(unknown): check that 1-arg constructors are explicit.
+    #                How to tell it's a constructor?
+    #                (handled in CheckForNonStandardConstructs for now)
+    # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
+    #                (level 1 error)
+    pass
+
+  # Check if people are using the verboten C basic types.  The only exception
+  # we regularly allow is "unsigned short port" for port.
+  if Search(r'\bshort port\b', line):
+    if not Search(r'\bunsigned short port\b', line):
+      error(filename, linenum, 'runtime/int', 4,
+            'Use "unsigned short" for ports, not "short"')
+  else:
+    match = Search(r'\b(short|long(?! +double)|long long)\b', line)
+    if match:
+      error(filename, linenum, 'runtime/int', 4,
+            'Use int16/int64/etc, rather than the C type %s' % match.group(1))
+
+  # When snprintf is used, the second argument shouldn't be a literal.
+  match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+  if match:
+    error(filename, linenum, 'runtime/printf', 3,
+          'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+          'to snprintf.' % (match.group(1), match.group(2)))
+
+  # Check if some verboten C functions are being used.
+  if Search(r'\bsprintf\b', line):
+    error(filename, linenum, 'runtime/printf', 5,
+          'Never use sprintf.  Use snprintf instead.')
+  match = Search(r'\b(strcpy|strcat)\b', line)
+  if match:
+    error(filename, linenum, 'runtime/printf', 4,
+          'Almost always, snprintf is better than %s' % match.group(1))
+
+  if Search(r'\bsscanf\b', line):
+    error(filename, linenum, 'runtime/printf', 1,
+          'sscanf can be ok, but is slow and can overflow buffers.')
+
+  # Check for suspicious usage of "if" like
+  # } if (a == b) {
+  if Search(r'\}\s*if\s*\(', line):
+    error(filename, linenum, 'readability/braces', 4,
+          'Did you mean "else if"? If not, start a new line for "if".')
+
+  # Check for potential format string bugs like printf(foo).
+  # We constrain the pattern not to pick things like DocidForPrintf(foo).
+  # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+  match = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
+  if match:
+    error(filename, linenum, 'runtime/printf', 4,
+          'Potential format string bug. Do %s("%%s", %s) instead.'
+          % (match.group(1), match.group(2)))
+
+  # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+  match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+  if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
+    error(filename, linenum, 'runtime/memset', 4,
+          'Did you mean "memset(%s, 0, %s)"?'
+          % (match.group(1), match.group(2)))
+
+  if Search(r'\busing namespace\b', line):
+    error(filename, linenum, 'build/namespaces', 5,
+          'Do not use namespace using-directives.  '
+          'Use using-declarations instead.')
+
+  # Detect variable-length arrays.
+  match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+  if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
+      match.group(3).find(']') == -1):
+    # Split the size using space and arithmetic operators as delimiters.
+    # If any of the resulting tokens are not compile time constants then
+    # report the error.
+    tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
+    is_const = True
+    skip_next = False
+    for tok in tokens:
+      if skip_next:
+        skip_next = False
+        continue
+
+      if Search(r'sizeof\(.+\)', tok): continue
+      if Search(r'arraysize\(\w+\)', tok): continue
+
+      tok = tok.lstrip('(')
+      tok = tok.rstrip(')')
+      if not tok: continue
+      if Match(r'\d+', tok): continue
+      if Match(r'0[xX][0-9a-fA-F]+', tok): continue
+      if Match(r'k[A-Z0-9]\w*', tok): continue
+      if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
+      if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
+      # A catch all for tricky sizeof cases, including 'sizeof expression',
+      # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
+      # requires skipping the next token becasue we split on ' ' and '*'.
+      if tok.startswith('sizeof'):
+        skip_next = True
+        continue
+      is_const = False
+      break
+    if not is_const:
+      error(filename, linenum, 'runtime/arrays', 1,
+            'Do not use variable-length arrays.  Use an appropriately named '
+            "('k' followed by CamelCase) compile-time constant for the size.")
+
+  # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
+  # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
+  # in the class declaration.
+  match = Match(
+      (r'\s*'
+       r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
+       r'\(.*\);$'),
+      line)
+  if match and linenum + 1 < clean_lines.NumLines():
+    next_line = clean_lines.elided[linenum + 1]
+    if not Search(r'^\s*};', next_line):
+      error(filename, linenum, 'readability/constructors', 3,
+            match.group(1) + ' should be the last thing in the class')
+
+  # Check for use of unnamed namespaces in header files.  Registration
+  # macros are typically OK, so we allow use of "namespace {" on lines
+  # that end with backslashes.
+  if (file_extension == 'h'
+      and Search(r'\bnamespace\s*{', line)
+      and line[-1] != '\\'):
+    error(filename, linenum, 'build/namespaces', 4,
+          'Do not use unnamed namespaces in header files.  See '
+          'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+          ' for more information.')
+
+
+def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
+                    error):
+  """Checks for a C-style cast by looking for the pattern.
+
+  This also handles sizeof(type) warnings, due to similarity of content.
+
+  Args:
+    filename: The name of the current file.
+    linenum: The number of the line to check.
+    line: The line of code to check.
+    raw_line: The raw line of code to check, with comments.
+    cast_type: The string for the C++ cast to recommend.  This is either
+      reinterpret_cast or static_cast, depending.
+    pattern: The regular expression used to find C-style casts.
+    error: The function to call with any errors found.
+  """
+  match = Search(pattern, line)
+  if not match:
+    return
+
+  # e.g., sizeof(int)
+  sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
+  if sizeof_match:
+    error(filename, linenum, 'runtime/sizeof', 1,
+          'Using sizeof(type).  Use sizeof(varname) instead if possible')
+    return
+
+  remainder = line[match.end(0):]
+
+  # The close paren is for function pointers as arguments to a function.
+  # eg, void foo(void (*bar)(int));
+  # The semicolon check is a more basic function check; also possibly a
+  # function pointer typedef.
+  # eg, void foo(int); or void foo(int) const;
+  # The equals check is for function pointer assignment.
+  # eg, void *(*foo)(int) = ...
+  #
+  # Right now, this will only catch cases where there's a single argument, and
+  # it's unnamed.  It should probably be expanded to check for multiple
+  # arguments with some unnamed.
+  function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
+  if function_match:
+    if (not function_match.group(3) or
+        function_match.group(3) == ';' or
+        raw_line.find('/*') < 0):
+      error(filename, linenum, 'readability/function', 3,
+            'All parameters should be named in a function')
+    return
+
+  # At this point, all that should be left is actual casts.
+  error(filename, linenum, 'readability/casting', 4,
+        'Using C-style cast.  Use %s<%s>(...) instead' %
+        (cast_type, match.group(1)))
+
+
+_HEADERS_CONTAINING_TEMPLATES = (
+    ('<deque>', ('deque',)),
+    ('<functional>', ('unary_function', 'binary_function',
+                      'plus', 'minus', 'multiplies', 'divides', 'modulus',
+                      'negate',
+                      'equal_to', 'not_equal_to', 'greater', 'less',
+                      'greater_equal', 'less_equal',
+                      'logical_and', 'logical_or', 'logical_not',
+                      'unary_negate', 'not1', 'binary_negate', 'not2',
+                      'bind1st', 'bind2nd',
+                      'pointer_to_unary_function',
+                      'pointer_to_binary_function',
+                      'ptr_fun',
+                      'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
+                      'mem_fun_ref_t',
+                      'const_mem_fun_t', 'const_mem_fun1_t',
+                      'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
+                      'mem_fun_ref',
+                     )),
+    ('<limits>', ('numeric_limits',)),
+    ('<list>', ('list',)),
+    ('<map>', ('map', 'multimap',)),
+    ('<memory>', ('allocator',)),
+    ('<queue>', ('queue', 'priority_queue',)),
+    ('<set>', ('set', 'multiset',)),
+    ('<stack>', ('stack',)),
+    ('<string>', ('char_traits', 'basic_string',)),
+    ('<utility>', ('pair',)),
+    ('<vector>', ('vector',)),
+
+    # gcc extensions.
+    # Note: std::hash is their hash, ::hash is our hash
+    ('<hash_map>', ('hash_map', 'hash_multimap',)),
+    ('<hash_set>', ('hash_set', 'hash_multiset',)),
+    ('<slist>', ('slist',)),
+    )
+
+_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
+    # We can trust with reasonable confidence that map gives us pair<>, too.
+    'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
+}
+
+_RE_PATTERN_STRING = re.compile(r'\bstring\b')
+
+_re_pattern_algorithm_header = []
+for _template in ('copy', 'max', 'min', 'sort', 'swap'):
+  # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
+  # type::max().
+  _re_pattern_algorithm_header.append(
+      (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
+       _template,
+       '<algorithm>'))
+
+_re_pattern_templates = []
+for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
+  for _template in _templates:
+    _re_pattern_templates.append(
+        (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
+         _template + '<>',
+         _header))
+
+
+def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error):
+  """Reports for missing stl includes.
+
+  This function will output warnings to make sure you are including the headers
+  necessary for the stl containers and functions that you use. We only give one
+  reason to include a header. For example, if you use both equal_to<> and
+  less<> in a .h file, only one (the latter in the file) of these will be
+  reported as a reason to include the <functional>.
+
+  We only check headers. We do not check inside cc-files. .cc files should be
+  able to depend on their respective header files for includes.  However, there
+  is no simple way of producing this logic here.
+
+  Args:
+    filename: The name of the current file.
+    clean_lines: A CleansedLines instance containing the file.
+    include_state: An _IncludeState instance.
+    error: The function to call with any errors found.
+  """
+  if filename.endswith('.cc'):
+    return
+
+  required = {}  # A map of header name to linenumber and the template entity.
+                 # Example of required: { '<functional>': (1219, 'less<>') }
+
+  for linenum in xrange(clean_lines.NumLines()):
+    line = clean_lines.elided[linenum]
+    if not line or line[0] == '#':
+      continue
+
+    # String is special -- it is a non-templatized type in STL.
+    if _RE_PATTERN_STRING.search(line):
+      required['<string>'] = (linenum, 'string')
+
+    for pattern, template, header in _re_pattern_algorithm_header:
+      if pattern.search(line):
+        required[header] = (linenum, template)
+
+    # The following function is just a speed up, no semantics are changed.
+    if not '<' in line:  # Reduces the cpu time usage by skipping lines.
+      continue
+
+    for pattern, template, header in _re_pattern_templates:
+      if pattern.search(line):
+        required[header] = (linenum, template)
+
+  # All the lines have been processed, report the errors found.
+  for required_header_unstripped in required:
+    template = required[required_header_unstripped][1]
+    if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
+      headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
+      if [True for header in headers if header in include_state]:
+        continue
+    if required_header_unstripped.strip('<>"') not in include_state:
+      error(filename, required[required_header_unstripped][0],
+            'build/include_what_you_use', 4,
+            'Add #include ' + required_header_unstripped + ' for ' + template)
+
+
+def ProcessLine(filename, file_extension,
+                clean_lines, line, include_state, function_state,
+                class_state, error):
+  """Processes a single line in the file.
+
+  Args:
+    filename: Filename of the file that is being processed.
+    file_extension: The extension (dot not included) of the file.
+    clean_lines: An array of strings, each representing a line of the file,
+                 with comments stripped.
+    line: Number of line being processed.
+    include_state: An _IncludeState instance in which the headers are inserted.
+    function_state: A _FunctionState instance which counts function lines, etc.
+    class_state: A _ClassState instance which maintains information about
+                 the current stack of nested class declarations being parsed.
+    error: A callable to which errors are reported, which takes 4 arguments:
+           filename, line number, error level, and message
+
+  """
+  raw_lines = clean_lines.raw_lines
+  CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
+  if Search(r'\bNOLINT\b', raw_lines[line]):  # ignore nolint lines
+    return
+  CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
+  CheckStyle(filename, clean_lines, line, file_extension, error)
+  CheckLanguage(filename, clean_lines, line, file_extension, include_state,
+                error)
+  CheckForNonStandardConstructs(filename, clean_lines, line,
+                                class_state, error)
+  CheckPosixThreading(filename, clean_lines, line, error)
+
+
+def ProcessFileData(filename, file_extension, lines, error):
+  """Performs lint checks and reports any errors to the given error function.
+
+  Args:
+    filename: Filename of the file that is being processed.
+    file_extension: The extension (dot not included) of the file.
+    lines: An array of strings, each representing a line of the file, with the
+           last element being empty if the file is termined with a newline.
+    error: A callable to which errors are reported, which takes 4 arguments:
+  """
+  lines = (['// marker so line numbers and indices both start at 1'] + lines +
+           ['// marker so line numbers end in a known way'])
+
+  include_state = _IncludeState()
+  function_state = _FunctionState()
+  class_state = _ClassState()
+
+  CheckForCopyright(filename, lines, error)
+
+  if file_extension == 'h':
+    CheckForHeaderGuard(filename, lines, error)
+
+  RemoveMultiLineComments(filename, lines, error)
+  clean_lines = CleansedLines(lines)
+  for line in xrange(clean_lines.NumLines()):
+    ProcessLine(filename, file_extension, clean_lines, line,
+                include_state, function_state, class_state, error)
+  class_state.CheckFinished(filename, error)
+
+  CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
+
+  # We check here rather than inside ProcessLine so that we see raw
+  # lines rather than "cleaned" lines.
+  CheckForUnicodeReplacementCharacters(filename, lines, error)
+
+  CheckForNewlineAtEOF(filename, lines, error)
+
+
+def ProcessFile(filename, vlevel):
+  """Does google-lint on a single file.
+
+  Args:
+    filename: The name of the file to parse.
+
+    vlevel: The level of errors to report.  Every error of confidence
+    >= verbose_level will be reported.  0 is a good default.
+  """
+
+  _SetVerboseLevel(vlevel)
+
+  try:
+    # Support the UNIX convention of using "-" for stdin.  Note that
+    # we are not opening the file with universal newline support
+    # (which codecs doesn't support anyway), so the resulting lines do
+    # contain trailing '\r' characters if we are reading a file that
+    # has CRLF endings.
+    # If after the split a trailing '\r' is present, it is removed
+    # below. If it is not expected to be present (i.e. os.linesep !=
+    # '\r\n' as in Windows), a warning is issued below if this file
+    # is processed.
+
+    if filename == '-':
+      lines = codecs.StreamReaderWriter(sys.stdin,
+                                        codecs.getreader('utf8'),
+                                        codecs.getwriter('utf8'),
+                                        'replace').read().split('\n')
+    else:
+      lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
+
+    carriage_return_found = False
+    # Remove trailing '\r'.
+    for linenum in range(len(lines)):
+      if lines[linenum].endswith('\r'):
+        lines[linenum] = lines[linenum].rstrip('\r')
+        carriage_return_found = True
+
+  except IOError:
+    sys.stderr.write(
+        "Skipping input '%s': Can't open for reading\n" % filename)
+    return
+
+  # Note, if no dot is found, this will give the entire filename as the ext.
+  file_extension = filename[filename.rfind('.') + 1:]
+
+  # When reading from stdin, the extension is unknown, so no cpplint tests
+  # should rely on the extension.
+  if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
+      and file_extension != 'cpp'):
+    sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
+  else:
+    ProcessFileData(filename, file_extension, lines, Error)
+    if carriage_return_found and os.linesep != '\r\n':
+      # Use 0 for linenum since outputing only one error for potentially
+      # several lines.
+      Error(filename, 0, 'whitespace/newline', 1,
+            'One or more unexpected \\r (^M) found;'
+            'better to use only a \\n')
+
+  sys.stderr.write('Done processing %s\n' % filename)
+
+
+def PrintUsage(message):
+  """Prints a brief usage string and exits, optionally with an error message.
+
+  Args:
+    message: The optional error message.
+  """
+  sys.stderr.write(_USAGE)
+  if message:
+    sys.exit('\nFATAL ERROR: ' + message)
+  else:
+    sys.exit(1)
+
+
+def PrintCategories():
+  """Prints a list of all the error-categories used by error messages.
+
+  These are the categories used to filter messages via --filter.
+  """
+  sys.stderr.write(_ERROR_CATEGORIES)
+  sys.exit(0)
+
+
+def ParseArguments(args):
+  """Parses the command line arguments.
+
+  This may set the output format and verbosity level as side-effects.
+
+  Args:
+    args: The command line arguments:
+
+  Returns:
+    The list of filenames to lint.
+  """
+  try:
+    (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
+                                                 'filter='])
+  except getopt.GetoptError:
+    PrintUsage('Invalid arguments.')
+
+  verbosity = _VerboseLevel()
+  output_format = _OutputFormat()
+  filters = ''
+
+  for (opt, val) in opts:
+    if opt == '--help':
+      PrintUsage(None)
+    elif opt == '--output':
+      if not val in ('emacs', 'vs7'):
+        PrintUsage('The only allowed output formats are emacs and vs7.')
+      output_format = val
+    elif opt == '--verbose':
+      verbosity = int(val)
+    elif opt == '--filter':
+      filters = val
+      if filters == '':
+        PrintCategories()
+
+  if not filenames:
+    PrintUsage('No files were specified.')
+
+  _SetOutputFormat(output_format)
+  _SetVerboseLevel(verbosity)
+  _SetFilters(filters)
+
+  return filenames
+
+
+def main():
+  filenames = ParseArguments(sys.argv[1:])
+
+  # Change stderr to write with replacement characters so we don't die
+  # if we try to print something containing non-ASCII characters.
+  sys.stderr = codecs.StreamReaderWriter(sys.stderr,
+                                         codecs.getreader('utf8'),
+                                         codecs.getwriter('utf8'),
+                                         'replace')
+
+  _cpplint_state.ResetErrorCount()
+  for filename in filenames:
+    ProcessFile(filename, _cpplint_state.verbose_level)
+  sys.stderr.write('Total errors found: %d\n' % _cpplint_state.error_count)
+  sys.exit(_cpplint_state.error_count > 0)
+
+
+if __name__ == '__main__':
+  main()

+ 5 - 0
gcl

@@ -0,0 +1,5 @@
+#!/bin/sh
+
+base_dir=$(dirname "$0")
+
+exec python "$base_dir/gcl.py" "$@"

+ 1 - 0
gcl.bat

@@ -0,0 +1 @@
+@call python "%~dp0gcl.py" %*

+ 1122 - 0
gcl.py

@@ -0,0 +1,1122 @@
+#!/usr/bin/python
+# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Wrapper script around Rietveld's upload.py that groups files into
+# changelists.
+
+import getpass
+import os
+import random
+import re
+import string
+import subprocess
+import sys
+import tempfile
+import upload
+import urllib2
+
+CODEREVIEW_SETTINGS = {
+  # Default values.
+  "CODE_REVIEW_SERVER": "codereview.chromium.org",
+  "CC_LIST": "chromium-reviews@googlegroups.com",
+  "VIEW_VC": "http://src.chromium.org/viewvc/chrome?view=rev&revision=",
+}
+
+# Use a shell for subcommands on Windows to get a PATH search, and because svn
+# may be a batch file.
+use_shell = sys.platform.startswith("win")
+
+# globals that store the root of the current repository and the directory where
+# we store information about changelists.
+repository_root = ""
+gcl_info_dir = ""
+
+# Filename where we store repository specific information for gcl.
+CODEREVIEW_SETTINGS_FILE = "codereview.settings"
+
+# Warning message when the change appears to be missing tests.
+MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
+
+# Global cache of files cached in GetInfoDir().
+FILES_CACHE = {}
+
+
+def IsSVNMoved(filename):
+  """Determine if a file has been added through svn mv"""
+  info = GetSVNFileInfo(filename)
+  return (info.get('Copied From URL') and
+          info.get('Copied From Rev') and
+          info.get('Schedule') == 'add')
+
+
+def GetSVNFileInfo(file):
+  """Returns a dictionary from the svn info output for the given file."""
+  output = RunShell(["svn", "info", file])
+  result = {}
+  re_key_value_pair = re.compile('^(.*)\: (.*)$')
+  for line in output.splitlines():
+    key_value_pair = re_key_value_pair.match(line)
+    if key_value_pair:
+      result[key_value_pair.group(1)] = key_value_pair.group(2)
+  return result
+
+
+def GetSVNFileProperty(file, property_name):
+  """Returns the value of an SVN property for the given file.
+
+  Args:
+    file: The file to check
+    property_name: The name of the SVN property, e.g. "svn:mime-type"
+
+  Returns:
+    The value of the property, which will be the empty string if the property
+    is not set on the file.  If the file is not under version control, the
+    empty string is also returned.
+  """
+  output = RunShell(["svn", "propget", property_name, file])
+  if (output.startswith("svn: ") and
+      output.endswith("is not under version control")):
+    return ""
+  else:
+    return output
+
+
+def GetRepositoryRoot():
+  """Returns the top level directory of the current repository.
+
+  The directory is returned as an absolute path.
+  """
+  global repository_root
+  if not repository_root:
+    cur_dir_repo_root = GetSVNFileInfo(os.getcwd()).get("Repository Root")
+    if not cur_dir_repo_root:
+      raise Exception("gcl run outside of repository")
+
+    repository_root = os.getcwd()
+    while True:
+      parent = os.path.dirname(repository_root)
+      if GetSVNFileInfo(parent).get("Repository Root") != cur_dir_repo_root:
+        break
+      repository_root = parent
+  return repository_root
+
+
+def GetInfoDir():
+  """Returns the directory where gcl info files are stored."""
+  global gcl_info_dir
+  if not gcl_info_dir:
+    gcl_info_dir = os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
+  return gcl_info_dir
+
+
+def GetCachedFile(filename, max_age=60*60*24*3, use_root=False):
+  """Retrieves a file from the repository and caches it in GetInfoDir() for
+  max_age seconds.
+
+  use_root: If False, look up the arborescence for the first match, otherwise go
+            directory to the root repository.
+  """
+  global FILES_CACHE
+  if filename not in FILES_CACHE:
+    # Don't try to look up twice.
+    FILES_CACHE[filename] = None
+    # First we check if we have a cached version.
+    cached_file = os.path.join(GetInfoDir(), filename)
+    if (not os.path.exists(cached_file) or
+        os.stat(cached_file).st_mtime > max_age):
+      dir_info = GetSVNFileInfo(".")
+      repo_root = dir_info["Repository Root"]
+      if use_root:
+        url_path = repo_root
+      else:
+        url_path = dir_info["URL"]
+      content = ""
+      while True:
+        # Look for the codereview.settings file at the current level.
+        svn_path = url_path + "/" + filename
+        content, rc = RunShellWithReturnCode(["svn", "cat", svn_path])
+        if not rc:
+          # Exit the loop if the file was found. Override content.
+          break
+        # Make sure to mark settings as empty if not found.
+        content = ""
+        if url_path == repo_root:
+          # Reached the root. Abandoning search.
+          break
+        # Go up one level to try again.
+        url_path = os.path.dirname(url_path)
+      # Write a cached version even if there isn't a file, so we don't try to
+      # fetch it each time.
+      WriteFile(cached_file, content)
+    else:
+      content = ReadFile(cached_settings_file)
+    FILES_CACHE[filename] = content
+  return FILES_CACHE[filename]
+
+
+def GetCodeReviewSetting(key):
+  """Returns a value for the given key for this repository."""
+  # Use '__just_initialized' as a flag to determine if the settings were
+  # already initialized.
+  if '__just_initialized' not in CODEREVIEW_SETTINGS:
+    for line in GetCachedFile(CODEREVIEW_SETTINGS_FILE).splitlines():
+      if not line or line.startswith("#"):
+        continue
+      k, v = line.split(": ", 1)
+      CODEREVIEW_SETTINGS[k] = v
+    CODEREVIEW_SETTINGS.setdefault('__just_initialized', None)
+  return CODEREVIEW_SETTINGS.get(key, "")
+
+
+def IsTreeOpen():
+  """Fetches the tree status and returns either True or False."""
+  url = GetCodeReviewSetting('STATUS')
+  status = ""
+  if url:
+    status = urllib2.urlopen(url).read()
+  return status.find('0') == -1
+
+
+def Warn(msg):
+  ErrorExit(msg, exit=False)
+
+
+def ErrorExit(msg, exit=True):
+  """Print an error message to stderr and optionally exit."""
+  print >>sys.stderr, msg
+  if exit:
+    sys.exit(1)
+
+
+def RunShellWithReturnCode(command, print_output=False):
+  """Executes a command and returns the output and the return code."""
+  p = subprocess.Popen(command, stdout=subprocess.PIPE,
+                       stderr=subprocess.STDOUT, shell=use_shell,
+                       universal_newlines=True)
+  if print_output:
+    output_array = []
+    while True:
+      line = p.stdout.readline()
+      if not line:
+        break
+      if print_output:
+        print line.strip('\n')
+      output_array.append(line)
+    output = "".join(output_array)
+  else:
+    output = p.stdout.read()
+  p.wait()
+  p.stdout.close()
+  return output, p.returncode
+
+
+def RunShell(command, print_output=False):
+  """Executes a command and returns the output."""
+  return RunShellWithReturnCode(command, print_output)[0]
+
+
+def ReadFile(filename):
+  """Returns the contents of a file."""
+  file = open(filename, 'r')
+  result = file.read()
+  file.close()
+  return result
+
+
+def WriteFile(filename, contents):
+  """Overwrites the file with the given contents."""
+  file = open(filename, 'w')
+  file.write(contents)
+  file.close()
+
+
+class ChangeInfo:
+  """Holds information about a changelist.
+
+    issue: the Rietveld issue number, of "" if it hasn't been uploaded yet.
+    description: the description.
+    files: a list of 2 tuple containing (status, filename) of changed files,
+           with paths being relative to the top repository directory.
+  """
+  def __init__(self, name="", issue="", description="", files=[]):
+    self.name = name
+    self.issue = issue
+    self.description = description
+    self.files = files
+    self.patch = None
+
+  def FileList(self):
+    """Returns a list of files."""
+    return [file[1] for file in self.files]
+
+  def _NonDeletedFileList(self):
+    """Returns a list of files in this change, not including deleted files."""
+    return [file[1] for file in self.files if not file[0].startswith("D")]
+
+  def _AddedFileList(self):
+    """Returns a list of files added in this change."""
+    return [file[1] for file in self.files if file[0].startswith("A")]
+
+  def Save(self):
+    """Writes the changelist information to disk."""
+    data = SEPARATOR.join([self.issue,
+                          "\n".join([f[0] + f[1] for f in self.files]),
+                          self.description])
+    WriteFile(GetChangelistInfoFile(self.name), data)
+
+  def Delete(self):
+    """Removes the changelist information from disk."""
+    os.remove(GetChangelistInfoFile(self.name))
+
+  def CloseIssue(self):
+    """Closes the Rietveld issue for this changelist."""
+    data = [("description", self.description),]
+    ctype, body = upload.EncodeMultipartFormData(data, [])
+    SendToRietveld("/" + self.issue + "/close", body, ctype)
+
+  def UpdateRietveldDescription(self):
+    """Sets the description for an issue on Rietveld."""
+    data = [("description", self.description),]
+    ctype, body = upload.EncodeMultipartFormData(data, [])
+    SendToRietveld("/" + self.issue + "/description", body, ctype)
+
+  def MissingTests(self):
+    """Returns True if the change looks like it needs unit tests but has none.
+
+    A change needs unit tests if it contains any new source files or methods.
+    """
+    SOURCE_SUFFIXES = [".cc", ".cpp", ".c", ".m", ".mm"]
+    # Ignore third_party entirely.
+    files = [file for file in self._NonDeletedFileList()
+             if file.find("third_party") == -1]
+    added_files = [file for file in self._AddedFileList()
+                   if file.find("third_party") == -1]
+
+    # If the change is entirely in third_party, we're done.
+    if len(files) == 0:
+      return False
+
+    # Any new or modified test files?
+    # A test file's name ends with "test.*" or "tests.*".
+    test_files = [test for test in files
+                  if os.path.splitext(test)[0].rstrip("s").endswith("test")]
+    if len(test_files) > 0:
+      return False
+
+    # Any new source files?
+    source_files = [file for file in added_files
+                    if os.path.splitext(file)[1] in SOURCE_SUFFIXES]
+    if len(source_files) > 0:
+      return True
+
+    # Do the long test, checking the files for new methods.
+    return self._HasNewMethod()
+
+  def _HasNewMethod(self):
+    """Returns True if the changeset contains any new functions, or if a
+    function signature has been changed.
+
+    A function is identified by starting flush left, containing a "(" before
+    the next flush-left line, and either ending with "{" before the next
+    flush-left line or being followed by an unindented "{".
+
+    Currently this returns True for new methods, new static functions, and
+    methods or functions whose signatures have been changed.
+
+    Inline methods added to header files won't be detected by this. That's
+    acceptable for purposes of determining if a unit test is needed, since
+    inline methods should be trivial.
+    """
+    # To check for methods added to source or header files, we need the diffs.
+    # We'll generate them all, since there aren't likely to be many files
+    # apart from source and headers; besides, we'll want them all if we're
+    # uploading anyway.
+    if self.patch is None:
+      self.patch = GenerateDiff(self.FileList())
+
+    definition = ""
+    for line in self.patch.splitlines():
+      if not line.startswith("+"):
+        continue
+      line = line.strip("+").rstrip(" \t")
+      # Skip empty lines, comments, and preprocessor directives.
+      # TODO(pamg): Handle multiline comments if it turns out to be a problem.
+      if line == "" or line.startswith("/") or line.startswith("#"):
+        continue
+
+      # A possible definition ending with "{" is complete, so check it.
+      if definition.endswith("{"):
+        if definition.find("(") != -1:
+          return True
+        definition = ""
+
+      # A { or an indented line, when we're in a definition, continues it.
+      if (definition != "" and
+          (line == "{" or line.startswith(" ") or line.startswith("\t"))):
+        definition += line
+
+      # A flush-left line starts a new possible function definition.
+      elif not line.startswith(" ") and not line.startswith("\t"):
+        definition = line
+
+    return False
+
+
+SEPARATOR = "\n-----\n"
+# The info files have the following format:
+# issue_id\n
+# SEPARATOR\n
+# filepath1\n
+# filepath2\n
+# .
+# .
+# filepathn\n
+# SEPARATOR\n
+# description
+
+
+def GetChangelistInfoFile(changename):
+  """Returns the file that stores information about a changelist."""
+  if not changename or re.search(r'[^\w-]', changename):
+    ErrorExit("Invalid changelist name: " + changename)
+  return os.path.join(GetInfoDir(), changename)
+
+
+def LoadChangelistInfoForMultiple(changenames, fail_on_not_found=True,
+                                  update_status=False):
+  """Loads many changes and merge their files list into one pseudo change.
+
+  This is mainly usefull to concatenate many changes into one for a 'gcl try'.
+  """
+  changes = changenames.split(',')
+  aggregate_change_info = ChangeInfo(name=changenames)
+  for change in changes:
+    aggregate_change_info.files += LoadChangelistInfo(change,
+                                                      fail_on_not_found,
+                                                      update_status).files
+  return aggregate_change_info
+
+
+def LoadChangelistInfo(changename, fail_on_not_found=True,
+                       update_status=False):
+  """Gets information about a changelist.
+
+  Args:
+    fail_on_not_found: if True, this function will quit the program if the
+      changelist doesn't exist.
+    update_status: if True, the svn status will be updated for all the files
+      and unchanged files will be removed.
+
+  Returns: a ChangeInfo object.
+  """
+  info_file = GetChangelistInfoFile(changename)
+  if not os.path.exists(info_file):
+    if fail_on_not_found:
+      ErrorExit("Changelist " + changename + " not found.")
+    return ChangeInfo(changename)
+  data = ReadFile(info_file)
+  split_data = data.split(SEPARATOR, 2)
+  if len(split_data) != 3:
+    os.remove(info_file)
+    ErrorExit("Changelist file %s was corrupt and deleted" % info_file)
+  issue = split_data[0]
+  files = []
+  for line in split_data[1].splitlines():
+    status = line[:7]
+    file = line[7:]
+    files.append((status, file))
+  description = split_data[2]
+  save = False
+  if update_status:
+    for file in files:
+      filename = os.path.join(GetRepositoryRoot(), file[1])
+      status = RunShell(["svn", "status", filename])[:7]
+      if not status:  # File has been reverted.
+        save = True
+        files.remove(file)
+      elif status != file[0]:
+        save = True
+        files[files.index(file)] = (status, file[1])
+  change_info = ChangeInfo(changename, issue, description, files)
+  if save:
+    change_info.Save()
+  return change_info
+
+
+def GetCLs():
+  """Returns a list of all the changelists in this repository."""
+  cls = os.listdir(GetInfoDir())
+  if CODEREVIEW_SETTINGS_FILE in cls:
+    cls.remove(CODEREVIEW_SETTINGS_FILE)
+  return cls
+
+
+def GenerateChangeName():
+  """Generate a random changelist name."""
+  random.seed()
+  current_cl_names = GetCLs()
+  while True:
+    cl_name = (random.choice(string.ascii_lowercase) +
+               random.choice(string.digits) +
+               random.choice(string.ascii_lowercase) +
+               random.choice(string.digits))
+    if cl_name not in current_cl_names:
+      return cl_name
+
+
+def GetModifiedFiles():
+  """Returns a set that maps from changelist name to (status,filename) tuples.
+
+  Files not in a changelist have an empty changelist name.  Filenames are in
+  relation to the top level directory of the current repository.  Note that
+  only the current directory and subdirectories are scanned, in order to
+  improve performance while still being flexible.
+  """
+  files = {}
+
+  # Since the files are normalized to the root folder of the repositary, figure
+  # out what we need to add to the paths.
+  dir_prefix = os.getcwd()[len(GetRepositoryRoot()):].strip(os.sep)
+
+  # Get a list of all files in changelists.
+  files_in_cl = {}
+  for cl in GetCLs():
+    change_info = LoadChangelistInfo(cl)
+    for status, filename in change_info.files:
+      files_in_cl[filename] = change_info.name
+
+  # Get all the modified files.
+  status = RunShell(["svn", "status"])
+  for line in status.splitlines():
+    if not len(line) or line[0] == "?":
+      continue
+    status = line[:7]
+    filename = line[7:].strip()
+    if dir_prefix:
+      filename = os.path.join(dir_prefix, filename)
+    change_list_name = ""
+    if filename in files_in_cl:
+      change_list_name = files_in_cl[filename]
+    files.setdefault(change_list_name, []).append((status, filename))
+
+  return files
+
+
+def GetFilesNotInCL():
+  """Returns a list of tuples (status,filename) that aren't in any changelists.
+
+  See docstring of GetModifiedFiles for information about path of files and
+  which directories are scanned.
+  """
+  modified_files = GetModifiedFiles()
+  if "" not in modified_files:
+    return []
+  return modified_files[""]
+
+
+def SendToRietveld(request_path, payload=None,
+                   content_type="application/octet-stream", timeout=None):
+  """Send a POST/GET to Rietveld.  Returns the response body."""
+  def GetUserCredentials():
+    """Prompts the user for a username and password."""
+    email = upload.GetEmail()
+    password = getpass.getpass("Password for %s: " % email)
+    return email, password
+
+  server = GetCodeReviewSetting("CODE_REVIEW_SERVER")
+  rpc_server = upload.HttpRpcServer(server,
+                                    GetUserCredentials,
+                                    host_override=server,
+                                    save_cookies=True)
+  try:
+    return rpc_server.Send(request_path, payload, content_type, timeout)
+  except urllib2.URLError, e:
+    if timeout is None:
+      ErrorExit("Error accessing url %s" % request_path)
+    else:
+      return None
+
+
+def GetIssueDescription(issue):
+  """Returns the issue description from Rietveld."""
+  return SendToRietveld("/" + issue + "/description")
+
+
+def UnknownFiles(extra_args):
+  """Runs svn status and prints unknown files.
+
+  Any args in |extra_args| are passed to the tool to support giving alternate
+  code locations.
+  """
+  args = ["svn", "status"]
+  args += extra_args
+  p = subprocess.Popen(args, stdout = subprocess.PIPE,
+                       stderr = subprocess.STDOUT, shell = use_shell)
+  while 1:
+    line = p.stdout.readline()
+    if not line:
+      break
+    if line[0] != '?':
+      continue  # Not an unknown file to svn.
+    # The lines look like this:
+    # "?      foo.txt"
+    # and we want just "foo.txt"
+    print line[7:].strip()
+  p.wait()
+  p.stdout.close()
+
+
+def Opened():
+  """Prints a list of modified files in the current directory down."""
+  files = GetModifiedFiles()
+  cl_keys = files.keys()
+  cl_keys.sort()
+  for cl_name in cl_keys:
+    if cl_name:
+      note = ""
+      if len(LoadChangelistInfo(cl_name).files) != len(files[cl_name]):
+        note = " (Note: this changelist contains files outside this directory)"
+      print "\n--- Changelist " + cl_name + note + ":"
+    for file in files[cl_name]:
+      print "".join(file)
+
+
+def Help(argv=None):
+  if argv and  argv[0] == 'try':
+    TryChange(None, ['--help'], swallow_exception=False)
+    return
+
+  print (
+"""GCL is a wrapper for Subversion that simplifies working with groups of files.
+
+Basic commands:
+-----------------------------------------
+   gcl change change_name
+      Add/remove files to a changelist. Only scans the current directory and
+      subdirectories.
+
+   gcl upload change_name [-r reviewer1@gmail.com,reviewer2@gmail.com,...]
+                          [--send_mail] [--no_try] [--no_presubmit]
+      Uploads the changelist to the server for review.
+
+   gcl commit change_name [--no_presubmit] [--force]
+      Commits the changelist to the repository.
+
+   gcl lint change_name
+      Check all the files in the changelist for possible style violations.
+
+Advanced commands:
+-----------------------------------------
+   gcl delete change_name
+      Deletes a changelist.
+
+   gcl diff change_name
+      Diffs all files in the changelist.
+
+   gcl presubmit change_name
+      Runs presubmit checks without uploading the changelist.
+
+   gcl diff
+      Diffs all files in the current directory and subdirectories that aren't in
+      a changelist.
+
+   gcl changes
+      Lists all the the changelists and the files in them.
+
+   gcl nothave [optional directory]
+      Lists files unknown to Subversion.
+
+   gcl opened
+      Lists modified files in the current directory and subdirectories.
+
+   gcl settings
+      Print the code review settings for this directory.
+
+   gcl status
+      Lists modified and unknown files in the current directory and
+      subdirectories.
+
+   gcl try change_name
+      Sends the change to the tryserver so a trybot can do a test run on your
+      code. To send multiple changes as one path, use a comma-separated list
+      of changenames.
+      --> Use 'gcl help try' for more information!
+""")
+
+def GetEditor():
+  editor = os.environ.get("SVN_EDITOR")
+  if not editor:
+    editor = os.environ.get("EDITOR")
+
+  if not editor:
+    if sys.platform.startswith("win"):
+      editor = "notepad"
+    else:
+      editor = "vi"
+
+  return editor
+
+
+def GenerateDiff(files, root=None):
+  """Returns a string containing the diff for the given file list.
+
+  The files in the list should either be absolute paths or relative to the
+  given root. If no root directory is provided, the repository root will be
+  used.
+  """
+  previous_cwd = os.getcwd()
+  if root is None:
+    os.chdir(GetRepositoryRoot())
+  else:
+    os.chdir(root)
+
+  diff = []
+  for file in files:
+    # Use svn info output instead of os.path.isdir because the latter fails
+    # when the file is deleted.
+    if GetSVNFileInfo(file).get("Node Kind") == "directory":
+      continue
+    # If the user specified a custom diff command in their svn config file,
+    # then it'll be used when we do svn diff, which we don't want to happen
+    # since we want the unified diff.  Using --diff-cmd=diff doesn't always
+    # work, since they can have another diff executable in their path that
+    # gives different line endings.  So we use a bogus temp directory as the
+    # config directory, which gets around these problems.
+    if sys.platform.startswith("win"):
+      parent_dir = tempfile.gettempdir()
+    else:
+      parent_dir = sys.path[0]  # tempdir is not secure.
+    bogus_dir = os.path.join(parent_dir, "temp_svn_config")
+    if not os.path.exists(bogus_dir):
+      os.mkdir(bogus_dir)
+    output = RunShell(["svn", "diff", "--config-dir", bogus_dir, file])
+    if output:
+      diff.append(output)
+    # On Posix platforms, svn diff on a mv/cp'd file outputs nothing.
+    # We put in an empty Index entry so upload.py knows about them.
+    elif not sys.platform.startswith("win") and IsSVNMoved(file):
+      diff.append("\nIndex: %s\n" % file)
+  os.chdir(previous_cwd)
+  return "".join(diff)
+
+
+def UploadCL(change_info, args):
+  if not change_info.FileList():
+    print "Nothing to upload, changelist is empty."
+    return
+
+  if not "--no_presubmit" in args:
+    if not DoPresubmitChecks(change_info, committing=False):
+      return
+  else:
+    args.remove("--no_presubmit")
+
+  no_try = "--no_try" in args
+  if no_try:
+    args.remove("--no_try")
+  else:
+    # Support --no-try as --no_try
+    no_try = "--no-try" in args
+    if no_try:
+      args.remove("--no-try")
+
+  # Map --send-mail to --send_mail
+  if "--send-mail" in args:
+    args.remove("--send-mail")
+    args.append("--send_mail")
+
+  # Supports --clobber for the try server.
+  clobber = False
+  if "--clobber" in args:
+    args.remove("--clobber")
+    clobber = True
+
+  # TODO(pamg): Do something when tests are missing. The plan is to upload a
+  # message to Rietveld and have it shown in the UI attached to this patch.
+
+  upload_arg = ["upload.py", "-y"]
+  upload_arg.append("--server=" + GetCodeReviewSetting("CODE_REVIEW_SERVER"))
+  upload_arg.extend(args)
+
+  desc_file = ""
+  if change_info.issue:  # Uploading a new patchset.
+    found_message = False
+    for arg in args:
+      if arg.startswith("--message") or arg.startswith("-m"):
+        found_message = True
+        break
+
+    if not found_message:
+      upload_arg.append("--message=''")
+
+    upload_arg.append("--issue=" + change_info.issue)
+  else: # First time we upload.
+    handle, desc_file = tempfile.mkstemp(text=True)
+    os.write(handle, change_info.description)
+    os.close(handle)
+
+    cc_list = GetCodeReviewSetting("CC_LIST")
+    if cc_list:
+      upload_arg.append("--cc=" + cc_list)
+    upload_arg.append("--description_file=" + desc_file + "")
+    if change_info.description:
+      subject = change_info.description[:77]
+      if subject.find("\r\n") != -1:
+        subject = subject[:subject.find("\r\n")]
+      if subject.find("\n") != -1:
+        subject = subject[:subject.find("\n")]
+      if len(change_info.description) > 77:
+        subject = subject + "..."
+      upload_arg.append("--message=" + subject)
+
+  # Change the current working directory before calling upload.py so that it
+  # shows the correct base.
+  previous_cwd = os.getcwd()
+  os.chdir(GetRepositoryRoot())
+
+  # If we have a lot of files with long paths, then we won't be able to fit
+  # the command to "svn diff".  Instead, we generate the diff manually for
+  # each file and concatenate them before passing it to upload.py.
+  if change_info.patch is None:
+    change_info.patch = GenerateDiff(change_info.FileList())
+  issue, patchset = upload.RealMain(upload_arg, change_info.patch)
+  if issue and issue != change_info.issue:
+    change_info.issue = issue
+    change_info.Save()
+
+  if desc_file:
+    os.remove(desc_file)
+
+  # Do background work on Rietveld to lint the file so that the results are
+  # ready when the issue is viewed.
+  SendToRietveld("/lint/issue%s_%s" % (issue, patchset), timeout=0.5)
+
+  # Once uploaded to Rietveld, send it to the try server.
+  if not no_try:
+    try_on_upload = GetCodeReviewSetting('TRY_ON_UPLOAD')
+    if try_on_upload and try_on_upload.lower() == 'true':
+      # Use the local diff.
+      args = [
+        "--issue", change_info.issue,
+        "--patchset", patchset,
+      ]
+      if clobber:
+        args.append('--clobber')
+      TryChange(change_info, args, swallow_exception=True)
+
+  os.chdir(previous_cwd)
+
+
+def PresubmitCL(change_info):
+  """Reports what presubmit checks on the change would report."""
+  if not change_info.FileList():
+    print "Nothing to presubmit check, changelist is empty."
+    return
+
+  print "*** Presubmit checks for UPLOAD would report: ***"
+  DoPresubmitChecks(change_info, committing=False)
+
+  print "\n\n*** Presubmit checks for COMMIT would report: ***"
+  DoPresubmitChecks(change_info, committing=True)
+
+
+def TryChange(change_info, args, swallow_exception):
+  """Create a diff file of change_info and send it to the try server."""
+  try:
+    import trychange
+  except ImportError:
+    if swallow_exception:
+      return
+    ErrorExit("You need to install trychange.py to use the try server.")
+
+  if change_info:
+    trychange_args = ['--name', change_info.name]
+    trychange_args.extend(args)
+    trychange.TryChange(trychange_args,
+                        file_list=change_info.FileList(),
+                        swallow_exception=swallow_exception,
+                        prog='gcl try')
+  else:
+    trychange.TryChange(args,
+                        file_list=None,
+                        swallow_exception=swallow_exception,
+                        prog='gcl try')
+
+
+def Commit(change_info, args):
+  if not change_info.FileList():
+    print "Nothing to commit, changelist is empty."
+    return
+
+  if not "--no_presubmit" in args:
+    if not DoPresubmitChecks(change_info, committing=True):
+      return
+  else:
+    args.remove("--no_presubmit")
+
+  no_tree_status_check = ("--force" in args or "-f" in args)
+  if not no_tree_status_check and not IsTreeOpen():
+    print ("Error: The tree is closed. Try again later or use --force to force"
+           " the commit. May the --force be with you.")
+    return
+
+  commit_cmd = ["svn", "commit"]
+  filename = ''
+  if change_info.issue:
+    # Get the latest description from Rietveld.
+    change_info.description = GetIssueDescription(change_info.issue)
+
+  commit_message = change_info.description.replace('\r\n', '\n')
+  if change_info.issue:
+    commit_message += ('\nReview URL: http://%s/%s' %
+                       (GetCodeReviewSetting("CODE_REVIEW_SERVER"),
+                        change_info.issue))
+
+  handle, commit_filename = tempfile.mkstemp(text=True)
+  os.write(handle, commit_message)
+  os.close(handle)
+
+  handle, targets_filename = tempfile.mkstemp(text=True)
+  os.write(handle, "\n".join(change_info.FileList()))
+  os.close(handle)
+
+  commit_cmd += ['--file=' + commit_filename]
+  commit_cmd += ['--targets=' + targets_filename]
+  # Change the current working directory before calling commit.
+  previous_cwd = os.getcwd()
+  os.chdir(GetRepositoryRoot())
+  output = RunShell(commit_cmd, True)
+  os.remove(commit_filename)
+  os.remove(targets_filename)
+  if output.find("Committed revision") != -1:
+    change_info.Delete()
+
+    if change_info.issue:
+      revision = re.compile(".*?\nCommitted revision (\d+)",
+                            re.DOTALL).match(output).group(1)
+      viewvc_url = GetCodeReviewSetting("VIEW_VC")
+      change_info.description = change_info.description + '\n'
+      if viewvc_url:
+        change_info.description += "\nCommitted: " + viewvc_url + revision
+      change_info.CloseIssue()
+  os.chdir(previous_cwd)
+
+
+def Change(change_info):
+  """Creates/edits a changelist."""
+  if change_info.issue:
+    try:
+      description = GetIssueDescription(change_info.issue)
+    except urllib2.HTTPError, err:
+      if err.code == 404:
+        # The user deleted the issue in Rietveld, so forget the old issue id.
+        description = change_info.description
+        change_info.issue = ""
+        change_info.Save()
+      else:
+        ErrorExit("Error getting the description from Rietveld: " + err)
+  else:
+    description = change_info.description
+
+  other_files = GetFilesNotInCL()
+
+  separator1 = ("\n---All lines above this line become the description.\n"
+                "---Repository Root: " + GetRepositoryRoot() + "\n"
+                "---Paths in this changelist (" + change_info.name + "):\n")
+  separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
+  text = (description + separator1 + '\n' +
+          '\n'.join([f[0] + f[1] for f in change_info.files]) + separator2 +
+          '\n'.join([f[0] + f[1] for f in other_files]) + '\n')
+
+  handle, filename = tempfile.mkstemp(text=True)
+  os.write(handle, text)
+  os.close(handle)
+
+  os.system(GetEditor() + " " + filename)
+
+  result = ReadFile(filename)
+  os.remove(filename)
+
+  if not result:
+    return
+
+  split_result = result.split(separator1, 1)
+  if len(split_result) != 2:
+    ErrorExit("Don't modify the text starting with ---!\n\n" + result)
+
+  new_description = split_result[0]
+  cl_files_text = split_result[1]
+  if new_description != description:
+    change_info.description = new_description
+    if change_info.issue:
+      # Update the Rietveld issue with the new description.
+      change_info.UpdateRietveldDescription()
+
+  new_cl_files = []
+  for line in cl_files_text.splitlines():
+    if not len(line):
+      continue
+    if line.startswith("---"):
+      break
+    status = line[:7]
+    file = line[7:]
+    new_cl_files.append((status, file))
+  change_info.files = new_cl_files
+
+  change_info.Save()
+  print change_info.name + " changelist saved."
+  if change_info.MissingTests():
+    Warn("WARNING: " + MISSING_TEST_MSG)
+
+# We don't lint files in these path prefixes.
+IGNORE_PATHS = ("webkit",)
+
+# Valid extensions for files we want to lint.
+CPP_EXTENSIONS = ("cpp", "cc", "h")
+
+def Lint(change_info, args):
+  """Runs cpplint.py on all the files in |change_info|"""
+  try:
+    import cpplint
+  except ImportError:
+    ErrorExit("You need to install cpplint.py to lint C++ files.")
+
+  # Change the current working directory before calling lint so that it
+  # shows the correct base.
+  previous_cwd = os.getcwd()
+  os.chdir(GetRepositoryRoot())
+
+  # Process cpplints arguments if any.
+  filenames = cpplint.ParseArguments(args + change_info.FileList())
+
+  for file in filenames:
+    if len([file for suffix in CPP_EXTENSIONS if file.endswith(suffix)]):
+      if len([file for prefix in IGNORE_PATHS if file.startswith(prefix)]):
+        print "Ignoring non-Google styled file %s" % file
+      else:
+        cpplint.ProcessFile(file, cpplint._cpplint_state.verbose_level)
+
+  print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
+  os.chdir(previous_cwd)
+
+
+def DoPresubmitChecks(change_info, committing):
+  """Imports presubmit, then calls presubmit.DoPresubmitChecks."""
+  # Need to import here to avoid circular dependency.
+  import presubmit
+  result = presubmit.DoPresubmitChecks(change_info,
+                                       committing,
+                                       verbose=False,
+                                       output_stream=sys.stdout,
+                                       input_stream=sys.stdin,
+                                       default_presubmit=
+                                          GetCachedFile('PRESUBMIT.py',
+                                                        use_root=True))
+  if not result:
+    print "\nPresubmit errors, can't continue (use --no_presubmit to bypass)"
+  return result
+
+
+def Changes():
+  """Print all the changelists and their files."""
+  for cl in GetCLs():
+    change_info = LoadChangelistInfo(cl, True, True)
+    print "\n--- Changelist " + change_info.name + ":"
+    for file in change_info.files:
+      print "".join(file)
+
+
+def main(argv=None):
+  if argv is None:
+    argv = sys.argv
+
+  if len(argv) == 1:
+    Help()
+    return 0;
+
+  # Create the directory where we store information about changelists if it
+  # doesn't exist.
+  if not os.path.exists(GetInfoDir()):
+    os.mkdir(GetInfoDir())
+
+  # Commands that don't require an argument.
+  command = argv[1]
+  if command == "opened":
+    Opened()
+    return 0
+  if command == "status":
+    Opened()
+    print "\n--- Not in any changelist:"
+    UnknownFiles([])
+    return 0
+  if command == "nothave":
+    UnknownFiles(argv[2:])
+    return 0
+  if command == "changes":
+    Changes()
+    return 0
+  if command == "help":
+    Help(argv[2:])
+    return 0
+  if command == "diff" and len(argv) == 2:
+    files = GetFilesNotInCL()
+    print GenerateDiff([x[1] for x in files])
+    return 0
+  if command == "settings":
+    ignore = GetCodeReviewSetting("UNKNOWN");
+    print CODEREVIEW_SETTINGS
+    return 0
+
+  if len(argv) == 2:
+    if command == "change":
+      # Generate a random changelist name.
+      changename = GenerateChangeName()
+    else:
+      ErrorExit("Need a changelist name.")
+  else:
+    changename = argv[2]
+
+  # When the command is 'try' and --patchset is used, the patch to try
+  # is on the Rietveld server. 'change' creates a change so it's fine if the
+  # change didn't exist. All other commands require an existing change.
+  fail_on_not_found = command != "try" and command != "change"
+  if command == "try" and changename.find(',') != -1:
+    change_info = LoadChangelistInfoForMultiple(changename, True, True)
+  else:
+    change_info = LoadChangelistInfo(changename, fail_on_not_found, True)
+
+  if command == "change":
+    Change(change_info)
+  elif command == "lint":
+    Lint(change_info, argv[3:])
+  elif command == "upload":
+    UploadCL(change_info, argv[3:])
+  elif command == "presubmit":
+    PresubmitCL(change_info)
+  elif command in ("commit", "submit"):
+    Commit(change_info, argv[3:])
+  elif command == "delete":
+    change_info.Delete()
+  elif command == "try":
+    # When the change contains no file, send the "changename" positional
+    # argument to trychange.py.
+    if change_info.files:
+      args = argv[3:]
+    else:
+      change_info = None
+      args = argv[2:]
+    TryChange(change_info, args, swallow_exception=False)
+  else:
+    # Everything else that is passed into gcl we redirect to svn, after adding
+    # the files. This allows commands such as 'gcl diff xxx' to work.
+    args =["svn", command]
+    root = GetRepositoryRoot()
+    args.extend([os.path.join(root, x) for x in change_info.FileList()])
+    RunShell(args, True)
+  return 0
+
+
+if __name__ == "__main__":
+  sys.exit(main())

+ 21 - 0
gclient

@@ -0,0 +1,21 @@
+#!/bin/sh
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script will try to sync the bootstrap directories and then defer control.
+
+base_dir=$(dirname "$0")
+
+if [ "X$DEPOT_TOOLS_UPDATE" != "X0" -a -e "$base_dir/.svn" ]
+then
+  # Update the bootstrap directory to stay up-to-date with the latest
+  # depot_tools.
+  svn -q up "$base_dir/bootstrap"
+
+  # Then defer the control to the bootstrapper.
+  "$base_dir/bootstrap/gclient.sh"
+else
+  exec "$base_dir/gclient.py" "$@"
+fi
+

+ 36 - 0
gclient.bat

@@ -0,0 +1,36 @@
+@echo off
+:: Copyright (c) 2009 The Chromium Authors. All rights reserved.
+:: Use of this source code is governed by a BSD-style license that can be
+:: found in the LICENSE file.
+
+:: This file is a stub to sync .\bootstrap first and defer control to
+:: .\bootstrap\gclient.bat, which will sync back '.'. This is unless auto
+:: update is disabled, were gclient.py is directly called.
+
+:: Shall skip automatic update?
+IF "%DEPOT_TOOLS_UPDATE%" == "0" GOTO :SKIP_UPDATE
+:: We can't sync if .\.svn\. doesn't exist.
+IF NOT EXIST "%~dp0.svn" GOTO :SKIP_UPDATE
+
+:: Will download svn and python if not already installed on the system.
+call "%~dp0bootstrap\win\win_tools.bat"
+if errorlevel 1 goto :EOF
+
+:: Sync the bootstrap directory *only after*.
+call svn up -q "%~dp0bootstrap"
+:: still continue even in case of error.
+goto :UPDATE
+
+
+:SKIP_UPDATE
+:: Don't bother to try to update any thing.
+python "%~dp0\gclient.py" %*
+goto :EOF
+
+
+:UPDATE
+:: Transfer control to ease the update process. The following lines won't be
+:: executed so don't add any! Specifically, don't use 'call' in the following
+:: line.
+"%~dp0bootstrap\gclient.bat" %*
+goto :EOF

+ 1649 - 0
gclient.py

@@ -0,0 +1,1649 @@
+#!/usr/bin/python
+#
+# Copyright 2008 Google Inc.  All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A wrapper script to manage a set of client modules in different SCM.
+
+This script is intended to be used to help basic management of client
+program sources residing in one or more Subversion modules, along with
+other modules it depends on, also in Subversion, but possibly on
+multiple respositories, making a wrapper system apparently necessary.
+
+Files
+  .gclient      : Current client configuration, written by 'config' command.
+                  Format is a Python script defining 'solutions', a list whose
+                  entries each are maps binding the strings "name" and "url"
+                  to strings specifying the name and location of the client
+                  module, as well as "custom_deps" to a map similar to the DEPS
+                  file below.
+  .gclient_entries : A cache constructed by 'update' command.  Format is a
+                  Python script defining 'entries', a list of the names
+                  of all modules in the client
+  <module>/DEPS : Python script defining var 'deps' as a map from each requisite
+                  submodule name to a URL where it can be found (via one SCM)
+
+Hooks
+  .gclient and DEPS files may optionally contain a list named "hooks" to
+  allow custom actions to be performed based on files that have changed in the
+  working copy as a result of a "sync"/"update" or "revert" operation.  Hooks
+  can also be run based on what files have been modified in the working copy
+  with the "runhooks" operation.  If any of these operation are run with
+  --force, all known hooks will run regardless of the state of the working
+  copy.
+
+  Each item in a "hooks" list is a dict, containing these two keys:
+    "pattern"  The associated value is a string containing a regular
+               expression.  When a file whose pathname matches the expression
+               is checked out, updated, or reverted, the hook's "action" will
+               run.
+    "action"   A list describing a command to run along with its arguments, if
+               any.  An action command will run at most one time per gclient
+               invocation, regardless of how many files matched the pattern.
+               The action is executed in the same directory as the .gclient
+               file.  If the first item in the list is the string "python",
+               the current Python interpreter (sys.executable) will be used
+               to run the command.
+
+  Example:
+    hooks = [
+      { "pattern": "\\.(gif|jpe?g|pr0n|png)$",
+        "action":  ["python", "image_indexer.py", "--all"]},
+    ]
+"""
+
+__author__ = "darinf@gmail.com (Darin Fisher)"
+__version__ = "0.3.1"
+
+import errno
+import optparse
+import os
+import re
+import stat
+import subprocess
+import sys
+import time
+import urlparse
+import xml.dom.minidom
+import urllib
+
+def getText(nodelist):
+  """
+  Return the concatenated text for the children of a list of DOM nodes.
+  """
+  rc = []
+  for node in nodelist:
+    if node.nodeType == node.TEXT_NODE:
+      rc.append(node.data)
+    else:
+      rc.append(getText(node.childNodes))
+  return ''.join(rc)
+
+
+SVN_COMMAND = "svn"
+
+
+# default help text
+DEFAULT_USAGE_TEXT = (
+"""usage: %prog <subcommand> [options] [--] [svn options/args...]
+a wrapper for managing a set of client modules in svn.
+Version """ + __version__ + """
+
+subcommands:
+   cleanup
+   config
+   diff
+   revert
+   status
+   sync
+   update
+   runhooks
+   revinfo
+
+Options and extra arguments can be passed to invoked svn commands by
+appending them to the command line.  Note that if the first such
+appended option starts with a dash (-) then the options must be
+preceded by -- to distinguish them from gclient options.
+
+For additional help on a subcommand or examples of usage, try
+   %prog help <subcommand>
+   %prog help files
+""")
+
+GENERIC_UPDATE_USAGE_TEXT = (
+    """Perform a checkout/update of the modules specified by the gclient
+configuration; see 'help config'.  Unless --revision is specified,
+then the latest revision of the root solutions is checked out, with
+dependent submodule versions updated according to DEPS files.
+If --revision is specified, then the given revision is used in place
+of the latest, either for a single solution or for all solutions.
+Unless the --force option is provided, solutions and modules whose
+local revision matches the one to update (i.e., they have not changed
+in the repository) are *not* modified.
+This a synonym for 'gclient %(alias)s'
+
+usage: gclient %(cmd)s [options] [--] [svn update options/args]
+
+Valid options:
+  --force             : force update even for unchanged modules
+  --revision REV      : update/checkout all solutions with specified revision
+  --revision SOLUTION@REV : update given solution to specified revision
+  --deps PLATFORM(S)  : sync deps for the given platform(s), or 'all'
+  --verbose           : output additional diagnostics
+
+Examples:
+  gclient %(cmd)s
+      update files from SVN according to current configuration,
+      *for modules which have changed since last update or sync*
+  gclient %(cmd)s --force
+      update files from SVN according to current configuration, for
+      all modules (useful for recovering files deleted from local copy)
+""")
+
+COMMAND_USAGE_TEXT = {
+    "cleanup":
+    """Clean up all working copies, using 'svn cleanup' for each module.
+Additional options and args may be passed to 'svn cleanup'.
+
+usage: cleanup [options] [--] [svn cleanup args/options]
+
+Valid options:
+  --verbose           : output additional diagnostics
+""",
+    "config": """Create a .gclient file in the current directory; this
+specifies the configuration for further commands.  After update/sync,
+top-level DEPS files in each module are read to determine dependent
+modules to operate on as well.  If optional [url] parameter is
+provided, then configuration is read from a specified Subversion server
+URL.  Otherwise, a --spec option must be provided.
+
+usage: config [option | url] [safesync url]
+
+Valid options:
+  --spec=GCLIENT_SPEC   : contents of .gclient are read from string parameter.
+                          *Note that due to Cygwin/Python brokenness, it
+                          probably can't contain any newlines.*
+
+Examples:
+  gclient config https://gclient.googlecode.com/svn/trunk/gclient
+      configure a new client to check out gclient.py tool sources
+  gclient config --spec='solutions=[{"name":"gclient","""
+    '"url":"https://gclient.googlecode.com/svn/trunk/gclient",'
+    '"custom_deps":{}}]',
+    "diff": """Display the differences between two revisions of modules.
+(Does 'svn diff' for each checked out module and dependences.)
+Additional args and options to 'svn diff' can be passed after
+gclient options.
+
+usage: diff [options] [--] [svn args/options]
+
+Valid options:
+  --verbose            : output additional diagnostics
+
+Examples:
+  gclient diff
+      simple 'svn diff' for configured client and dependences
+  gclient diff -- -x -b
+      use 'svn diff -x -b' to suppress whitespace-only differences
+  gclient diff -- -r HEAD -x -b
+      diff versus the latest version of each module
+""",
+    "revert":
+    """Revert every file in every managed directory in the client view.
+
+usage: revert
+""",
+    "status":
+    """Show the status of client and dependent modules, using 'svn diff'
+for each module.  Additional options and args may be passed to 'svn diff'.
+
+usage: status [options] [--] [svn diff args/options]
+
+Valid options:
+  --verbose           : output additional diagnostics
+""",
+    "sync": GENERIC_UPDATE_USAGE_TEXT % {"cmd": "sync", "alias": "update"},
+    "update": GENERIC_UPDATE_USAGE_TEXT % {"cmd": "update", "alias": "sync"},
+    "help": """Describe the usage of this program or its subcommands.
+
+usage: help [options] [subcommand]
+
+Valid options:
+  --verbose           : output additional diagnostics
+""",
+    "runhooks":
+    """Runs hooks for files that have been modified in the local working copy,
+according to 'svn status'.
+
+usage: runhooks [options]
+
+Valid options:
+  --force             : runs all known hooks, regardless of the working
+                        copy status
+  --verbose           : output additional diagnostics
+""",
+    "revinfo":
+    """Outputs source path, server URL and revision information for every
+dependency in all solutions (no local checkout required).
+
+usage: revinfo [options]
+""",
+}
+
+# parameterized by (solution_name, solution_url, safesync_url)
+DEFAULT_CLIENT_FILE_TEXT = (
+    """
+# An element of this array (a \"solution\") describes a repository directory
+# that will be checked out into your working copy.  Each solution may
+# optionally define additional dependencies (via its DEPS file) to be
+# checked out alongside the solution's directory.  A solution may also
+# specify custom dependencies (via the \"custom_deps\" property) that
+# override or augment the dependencies specified by the DEPS file.
+# If a \"safesync_url\" is specified, it is assumed to reference the location of
+# a text file which contains nothing but the last known good SCM revision to
+# sync against. It is fetched if specified and used unless --head is passed
+solutions = [
+  { \"name\"        : \"%s\",
+    \"url\"         : \"%s\",
+    \"custom_deps\" : {
+      # To use the trunk of a component instead of what's in DEPS:
+      #\"component\": \"https://svnserver/component/trunk/\",
+      # To exclude a component from your working copy:
+      #\"data/really_large_component\": None,
+    },
+    \"safesync_url\": \"%s\"
+  }
+]
+""")
+
+
+## Generic utils
+
+
+class Error(Exception):
+  """gclient exception class."""
+  pass
+
+class PrintableObject(object):
+  def __str__(self):
+    output = ''
+    for i in dir(self):
+      if i.startswith('__'):
+        continue
+      output += '%s = %s\n' % (i, str(getattr(self, i, '')))
+    return output
+
+
+def FileRead(filename):
+  content = None
+  f = open(filename, "rU")
+  try:
+    content = f.read()
+  finally:
+    f.close()
+  return content
+
+
+def FileWrite(filename, content):
+  f = open(filename, "w")
+  try:
+    f.write(content)
+  finally:
+    f.close()
+
+
+def RemoveDirectory(*path):
+  """Recursively removes a directory, even if it's marked read-only.
+
+  Remove the directory located at *path, if it exists.
+
+  shutil.rmtree() doesn't work on Windows if any of the files or directories
+  are read-only, which svn repositories and some .svn files are.  We need to
+  be able to force the files to be writable (i.e., deletable) as we traverse
+  the tree.
+
+  Even with all this, Windows still sometimes fails to delete a file, citing
+  a permission error (maybe something to do with antivirus scans or disk
+  indexing).  The best suggestion any of the user forums had was to wait a
+  bit and try again, so we do that too.  It's hand-waving, but sometimes it
+  works. :/
+
+  On POSIX systems, things are a little bit simpler.  The modes of the files
+  to be deleted doesn't matter, only the modes of the directories containing
+  them are significant.  As the directory tree is traversed, each directory
+  has its mode set appropriately before descending into it.  This should
+  result in the entire tree being removed, with the possible exception of
+  *path itself, because nothing attempts to change the mode of its parent.
+  Doing so would be hazardous, as it's not a directory slated for removal.
+  In the ordinary case, this is not a problem: for our purposes, the user
+  will never lack write permission on *path's parent.
+  """
+  file_path = os.path.join(*path)
+  if not os.path.exists(file_path):
+    return
+
+  if os.path.islink(file_path) or not os.path.isdir(file_path):
+    raise Error("RemoveDirectory asked to remove non-directory %s" % file_path)
+
+  has_win32api = False
+  if sys.platform == 'win32':
+    has_win32api = True
+    # Some people don't have the APIs installed. In that case we'll do without.
+    try:
+      win32api = __import__('win32api')
+      win32con = __import__('win32con')
+    except ImportError:
+      has_win32api = False
+  else:
+    # On POSIX systems, we need the x-bit set on the directory to access it,
+    # the r-bit to see its contents, and the w-bit to remove files from it.
+    # The actual modes of the files within the directory is irrelevant.
+    os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+  for fn in os.listdir(file_path):
+    fullpath = os.path.join(file_path, fn)
+
+    # If fullpath is a symbolic link that points to a directory, isdir will
+    # be True, but we don't want to descend into that as a directory, we just
+    # want to remove the link.  Check islink and treat links as ordinary files
+    # would be treated regardless of what they reference.
+    if os.path.islink(fullpath) or not os.path.isdir(fullpath):
+      if sys.platform == 'win32':
+        os.chmod(fullpath, stat.S_IWRITE)
+        if has_win32api:
+          win32api.SetFileAttributes(fullpath, win32con.FILE_ATTRIBUTE_NORMAL)
+      try:
+        os.remove(fullpath)
+      except OSError, e:
+        if e.errno != errno.EACCES or sys.platform != 'win32':
+          raise
+        print 'Failed to delete %s: trying again' % fullpath
+        time.sleep(0.1)
+        os.remove(fullpath)
+    else:
+      RemoveDirectory(fullpath)
+
+  if sys.platform == 'win32':
+    os.chmod(file_path, stat.S_IWRITE)
+    if has_win32api:
+      win32api.SetFileAttributes(file_path, win32con.FILE_ATTRIBUTE_NORMAL)
+  try:
+    os.rmdir(file_path)
+  except OSError, e:
+    if e.errno != errno.EACCES or sys.platform != 'win32':
+      raise
+    print 'Failed to remove %s: trying again' % file_path
+    time.sleep(0.1)
+    os.rmdir(file_path)
+
+
+def SubprocessCall(command, in_directory, out, fail_status=None):
+  """Runs command, a list, in directory in_directory.
+
+  This function wraps SubprocessCallAndCapture, but does not perform the
+  capturing functions.  See that function for a more complete usage
+  description.
+  """
+  # Call subprocess and capture nothing:
+  SubprocessCallAndCapture(command, in_directory, out, fail_status)
+
+
+def SubprocessCallAndCapture(command, in_directory, out, fail_status=None,
+                             pattern=None, capture_list=None):
+  """Runs command, a list, in directory in_directory.
+
+  A message indicating what is being done, as well as the command's stdout,
+  is printed to out.
+
+  If a pattern is specified, any line in the output matching pattern will have
+  its first match group appended to capture_list.
+
+  If the command fails, as indicated by a nonzero exit status, gclient will
+  exit with an exit status of fail_status.  If fail_status is None (the
+  default), gclient will raise an Error exception.
+  """
+
+  print >> out, ("\n________ running \'%s\' in \'%s\'"
+      % (' '.join(command), in_directory))
+
+  # *Sigh*:  Windows needs shell=True, or else it won't search %PATH% for the
+  # executable, but shell=True makes subprocess on Linux fail when it's called
+  # with a list because it only tries to execute the first item in the list.
+  kid = subprocess.Popen(command, bufsize=0, cwd=in_directory,
+      shell=(sys.platform == 'win32'), stdout=subprocess.PIPE)
+
+  if pattern:
+    compiled_pattern = re.compile(pattern)
+
+  # Also, we need to forward stdout to prevent weird re-ordering of output.
+  # This has to be done on a per byte basis to make sure it is not buffered:
+  # normally buffering is done for each line, but if svn requests input, no
+  # end-of-line character is output after the prompt and it would not show up.
+  in_byte = kid.stdout.read(1)
+  in_line = ""
+  while in_byte:
+    if in_byte != "\r":
+      out.write(in_byte)
+      in_line += in_byte
+    if in_byte == "\n" and pattern:
+      match = compiled_pattern.search(in_line[:-1])
+      if match:
+        capture_list.append(match.group(1))
+      in_line = ""
+    in_byte = kid.stdout.read(1)
+  rv = kid.wait()
+
+  if rv:
+    msg = "failed to run command: %s" % " ".join(command)
+
+    if fail_status != None:
+      print >>sys.stderr, msg
+      sys.exit(fail_status)
+
+    raise Error(msg)
+
+
+def IsUsingGit(root, paths):
+  """Returns True if we're using git to manage any of our checkouts.
+  |entries| is a list of paths to check."""
+  for path in paths:
+    if os.path.exists(os.path.join(root, path, '.git')):
+      return True
+  return False
+
+# -----------------------------------------------------------------------------
+# SVN utils:
+
+
+def RunSVN(options, args, in_directory):
+  """Runs svn, sending output to stdout.
+
+  Args:
+    args: A sequence of command line parameters to be passed to svn.
+    in_directory: The directory where svn is to be run.
+
+  Raises:
+    Error: An error occurred while running the svn command.
+  """
+  c = [SVN_COMMAND]
+  c.extend(args)
+
+  SubprocessCall(c, in_directory, options.stdout)
+
+
+def CaptureSVN(options, args, in_directory):
+  """Runs svn, capturing output sent to stdout as a string.
+
+  Args:
+    args: A sequence of command line parameters to be passed to svn.
+    in_directory: The directory where svn is to be run.
+
+  Returns:
+    The output sent to stdout as a string.
+  """
+  c = [SVN_COMMAND]
+  c.extend(args)
+
+  # *Sigh*:  Windows needs shell=True, or else it won't search %PATH% for
+  # the svn.exe executable, but shell=True makes subprocess on Linux fail
+  # when it's called with a list because it only tries to execute the
+  # first string ("svn").
+  return subprocess.Popen(c, cwd=in_directory, shell=(sys.platform == 'win32'),
+                          stdout=subprocess.PIPE).communicate()[0]
+
+
+def RunSVNAndGetFileList(options, args, in_directory, file_list):
+  """Runs svn checkout, update, or status, output to stdout.
+
+  The first item in args must be either "checkout", "update", or "status".
+
+  svn's stdout is parsed to collect a list of files checked out or updated.
+  These files are appended to file_list.  svn's stdout is also printed to
+  sys.stdout as in RunSVN.
+
+  Args:
+    args: A sequence of command line parameters to be passed to svn.
+    in_directory: The directory where svn is to be run.
+
+  Raises:
+    Error: An error occurred while running the svn command.
+  """
+  command = [SVN_COMMAND]
+  command.extend(args)
+
+  # svn update and svn checkout use the same pattern: the first three columns
+  # are for file status, property status, and lock status.  This is followed
+  # by two spaces, and then the path to the file.
+  update_pattern = '^...  (.*)$'
+
+  # The first three columns of svn status are the same as for svn update and
+  # svn checkout.  The next three columns indicate addition-with-history,
+  # switch, and remote lock status.  This is followed by one space, and then
+  # the path to the file.
+  status_pattern = '^...... (.*)$'
+
+  # args[0] must be a supported command.  This will blow up if it's something
+  # else, which is good.  Note that the patterns are only effective when
+  # these commands are used in their ordinary forms, the patterns are invalid
+  # for "svn status --show-updates", for example.
+  pattern = {
+        'checkout': update_pattern,
+        'status':   status_pattern,
+        'update':   update_pattern,
+      }[args[0]]
+
+  SubprocessCallAndCapture(command, in_directory, options.stdout,
+                           pattern=pattern, capture_list=file_list)
+
+
+def CaptureSVNInfo(options, relpath, in_directory):
+  """Runs 'svn info' on an existing path.
+
+  Args:
+    relpath: The directory where the working copy resides relative to
+      the directory given by in_directory.
+    in_directory: The directory where svn is to be run.
+
+  Returns:
+    An object with fields corresponding to the output of 'svn info'
+  """
+  info = CaptureSVN(options, ["info", "--xml", relpath], in_directory)
+  dom = xml.dom.minidom.parseString(info)
+
+  # str() the getText() results because they may be returned as
+  # Unicode, which interferes with the higher layers matching up
+  # things in the deps dictionary.
+  result = PrintableObject()
+  result.root = str(getText(dom.getElementsByTagName('root')))
+  result.url = str(getText(dom.getElementsByTagName('url')))
+  result.uuid = str(getText(dom.getElementsByTagName('uuid')))
+  result.revision = int(dom.getElementsByTagName('entry')[0].getAttribute(
+                            'revision'))
+  return result
+
+
+def CaptureSVNHeadRevision(options, url):
+  """Get the head revision of a SVN repository.
+
+  Returns:
+    Int head revision
+  """
+  info = CaptureSVN(options, ["info", "--xml", url], os.getcwd())
+  dom = xml.dom.minidom.parseString(info)
+  return int(dom.getElementsByTagName('entry')[0].getAttribute('revision'))
+
+
+class FileStatus:
+  def __init__(self, path, text_status, props, locked, history, switched,
+               repo_locked, out_of_date):
+    self.path = path.strip()
+    self.text_status = text_status
+    self.props = props
+    self.locked = locked
+    self.history = history
+    self.switched = switched
+    self.repo_locked = repo_locked
+    self.out_of_date = out_of_date
+
+  def __str__(self):
+    return (self.text_status + self.props + self.locked + self.history +
+            self.switched + self.repo_locked + self.out_of_date +
+            self.path)
+
+
+def CaptureSVNStatus(options, path):
+  """Runs 'svn status' on an existing path.
+
+  Args:
+    path: The directory to run svn status.
+
+  Returns:
+    An array of FileStatus corresponding to the output of 'svn status'
+  """
+  info = CaptureSVN(options, ["status"], path)
+  result = []
+  if not info:
+    return result
+  for line in info.splitlines():
+    if line:
+      new_item = FileStatus(line[7:], line[0:1], line[1:2], line[2:3],
+                            line[3:4], line[4:5], line[5:6], line[6:7])
+      result.append(new_item)
+  return result
+
+
+### SCM abstraction layer
+
+
+class SCMWrapper(object):
+  """Add necessary glue between all the supported SCM.
+
+  This is the abstraction layer to bind to different SCM. Since currently only
+  subversion is supported, a lot of subersionism remains. This can be sorted out
+  once another SCM is supported."""
+  def __init__(self, url=None, root_dir=None, relpath=None,
+               scm_name='svn'):
+    # TODO(maruel): Deduce the SCM from the url.
+    self.scm_name = scm_name
+    self.url = url
+    self._root_dir = root_dir
+    if self._root_dir:
+      self._root_dir = self._root_dir.replace('/', os.sep).strip()
+    self.relpath = relpath
+    if self.relpath:
+      self.relpath = self.relpath.replace('/', os.sep).strip()
+
+  def FullUrlForRelativeUrl(self, url):
+    # Find the forth '/' and strip from there. A bit hackish.
+    return '/'.join(self.url.split('/')[:4]) + url
+
+  def RunCommand(self, command, options, args, file_list=None):
+    # file_list will have all files that are modified appended to it.
+
+    if file_list == None:
+      file_list = []
+
+    commands = {
+          'cleanup':  self.cleanup,
+          'update':   self.update,
+          'revert':   self.revert,
+          'status':   self.status,
+          'diff':     self.diff,
+          'runhooks': self.status,
+        }
+
+    if not command in commands:
+      raise Error('Unknown command %s' % command)
+
+    return commands[command](options, args, file_list)
+
+  def cleanup(self, options, args, file_list):
+    """Cleanup working copy."""
+    command = ['cleanup']
+    command.extend(args)
+    RunSVN(options, command, os.path.join(self._root_dir, self.relpath))
+
+  def diff(self, options, args, file_list):
+    # NOTE: This function does not currently modify file_list.
+    command = ['diff']
+    command.extend(args)
+    RunSVN(options, command, os.path.join(self._root_dir, self.relpath))
+
+  def update(self, options, args, file_list):
+    """Runs SCM to update or transparently checkout the working copy.
+
+    All updated files will be appended to file_list.
+
+    Raises:
+      Error: if can't get URL for relative path.
+    """
+    # Only update if git is not controlling the directory.
+    git_path = os.path.join(self._root_dir, self.relpath, '.git')
+    if options.path_exists(git_path):
+      print >> options.stdout, (
+          "________ found .git directory; skipping %s" % self.relpath)
+      return
+
+    if args:
+      raise Error("Unsupported argument(s): %s" % ",".join(args))
+
+    url = self.url
+    components = url.split("@")
+    revision = None
+    forced_revision = False
+    if options.revision:
+      # Override the revision number.
+      url = '%s@%s' % (components[0], str(options.revision))
+      revision = int(options.revision)
+      forced_revision = True
+    elif len(components) == 2:
+      revision = int(components[1])
+      forced_revision = True
+
+    rev_str = ""
+    if revision:
+      rev_str = ' at %d' % revision
+
+    if not options.path_exists(os.path.join(self._root_dir, self.relpath)):
+      # We need to checkout.
+      command = ['checkout', url, os.path.join(self._root_dir, self.relpath)]
+      RunSVNAndGetFileList(options, command, self._root_dir, file_list)
+
+    # Get the existing scm url and the revision number of the current checkout.
+    from_info = CaptureSVNInfo(options,
+                               os.path.join(self._root_dir, self.relpath, '.'),
+                               '.')
+
+    if options.manually_grab_svn_rev:
+      # Retrieve the current HEAD version because svn is slow at null updates.
+      if not revision:
+        from_info_live = CaptureSVNInfo(options, from_info.url, '.')
+        revision = int(from_info_live.revision)
+        rev_str = ' at %d' % revision
+
+    if from_info.url != components[0]:
+      to_info = CaptureSVNInfo(options, url, '.')
+      if from_info.root != to_info.root:
+        # We have different roots, so check if we can switch --relocate.
+        # Subversion only permits this if the repository UUIDs match.
+        if from_info.uuid != to_info.uuid:
+          raise Error("Can't switch the checkout to %s; UUID don't match" % url)
+
+        # Perform the switch --relocate, then rewrite the from_url
+        # to reflect where we "are now."  (This is the same way that
+        # Subversion itself handles the metadata when switch --relocate
+        # is used.)  This makes the checks below for whether we
+        # can update to a revision or have to switch to a different
+        # branch work as expected.
+        # TODO(maruel):  TEST ME !
+        command = ["switch", "--relocate", from_info.root, to_info.root,
+                   self.relpath]
+        RunSVN(options, command, self._root_dir)
+        from_info.url = from_info.url.replace(from_info.root, to_info.root)
+
+    # If the provided url has a revision number that matches the revision
+    # number of the existing directory, then we don't need to bother updating.
+    if not options.force and from_info.revision == revision:
+      if options.verbose or not forced_revision:
+        print >>options.stdout, ("\n_____ %s%s" % (
+            self.relpath, rev_str))
+      return
+
+    command = ["update", os.path.join(self._root_dir, self.relpath)]
+    if revision:
+      command.extend(['--revision', str(revision)])
+    RunSVNAndGetFileList(options, command, self._root_dir, file_list)
+
+  def revert(self, options, args, file_list):
+    """Reverts local modifications. Subversion specific.
+
+    All reverted files will be appended to file_list, even if Subversion
+    doesn't know about them.
+    """
+    path = os.path.join(self._root_dir, self.relpath)
+    if not os.path.isdir(path):
+      # We can't revert path that doesn't exist.
+      # TODO(maruel):  Should we update instead?
+      if options.verbose:
+        print >>options.stdout, ("\n_____ %s is missing, can't revert" %
+                                 self.relpath)
+      return
+
+    files = CaptureSVNStatus(options, path)
+    # Batch the command.
+    files_to_revert = []
+    for file in files:
+      file_path = os.path.join(path, file.path)
+      print >>options.stdout, file_path
+      # Unversioned file or unexpected unversioned file.
+      if file.text_status in ('?', '~'):
+        # Remove extraneous file. Also remove unexpected unversioned
+        # directories. svn won't touch them but we want to delete these.
+        file_list.append(file_path)
+        try:
+          os.remove(file_path)
+        except EnvironmentError:
+          RemoveDirectory(file_path)
+
+      if file.text_status != '?':
+        # For any other status, svn revert will work.
+        file_list.append(file_path)
+        files_to_revert.append(file.path)
+
+    # Revert them all at once.
+    if files_to_revert:
+      accumulated_paths = []
+      accumulated_length = 0
+      command = ['revert']
+      for p in files_to_revert:
+        # Some shell have issues with command lines too long.
+        if accumulated_length and accumulated_length + len(p) > 3072:
+          RunSVN(options, command + accumulated_paths,
+                 os.path.join(self._root_dir, self.relpath))
+          accumulated_paths = []
+          accumulated_length = 0
+        else:
+          accumulated_paths.append(p)
+          accumulated_length += len(p)
+      if accumulated_paths:
+        RunSVN(options, command + accumulated_paths,
+               os.path.join(self._root_dir, self.relpath))
+
+  def status(self, options, args, file_list):
+    """Display status information."""
+    command = ['status']
+    command.extend(args)
+    RunSVNAndGetFileList(options, command,
+                         os.path.join(self._root_dir, self.relpath), file_list)
+
+
+## GClient implementation.
+
+
+class GClient(object):
+  """Object that represent a gclient checkout."""
+
+  supported_commands = [
+    'cleanup', 'diff', 'revert', 'status', 'update', 'runhooks'
+  ]
+
+  def __init__(self, root_dir, options):
+    self._root_dir = root_dir
+    self._options = options
+    self._config_content = None
+    self._config_dict = {}
+    self._deps_hooks = []
+
+  def SetConfig(self, content):
+    self._config_dict = {}
+    self._config_content = content
+    exec(content, self._config_dict)
+
+  def SaveConfig(self):
+    FileWrite(os.path.join(self._root_dir, self._options.config_filename),
+              self._config_content)
+
+  def _LoadConfig(self):
+    client_source = FileRead(os.path.join(self._root_dir,
+                                          self._options.config_filename))
+    self.SetConfig(client_source)
+
+  def ConfigContent(self):
+    return self._config_content
+
+  def GetVar(self, key, default=None):
+    return self._config_dict.get(key, default)
+
+  @staticmethod
+  def LoadCurrentConfig(options, from_dir=None):
+    """Searches for and loads a .gclient file relative to the current working
+    dir.
+
+    Returns:
+      A dict representing the contents of the .gclient file or an empty dict if
+      the .gclient file doesn't exist.
+    """
+    if not from_dir:
+      from_dir = os.curdir
+    path = os.path.realpath(from_dir)
+    while not options.path_exists(os.path.join(path, options.config_filename)):
+      next = os.path.split(path)
+      if not next[1]:
+        return None
+      path = next[0]
+    client = options.gclient(path, options)
+    client._LoadConfig()
+    return client
+
+  def SetDefaultConfig(self, solution_name, solution_url, safesync_url):
+    self.SetConfig(DEFAULT_CLIENT_FILE_TEXT % (
+      solution_name, solution_url, safesync_url
+    ))
+
+  def _SaveEntries(self, entries):
+    """Creates a .gclient_entries file to record the list of unique checkouts.
+
+    The .gclient_entries file lives in the same directory as .gclient.
+
+    Args:
+      entries: A sequence of solution names.
+    """
+    text = "entries = [\n"
+    for entry in entries:
+      text += "  \"%s\",\n" % entry
+    text += "]\n"
+    FileWrite(os.path.join(self._root_dir, self._options.entries_filename),
+              text)
+
+  def _ReadEntries(self):
+    """Read the .gclient_entries file for the given client.
+
+    Args:
+      client: The client for which the entries file should be read.
+
+    Returns:
+      A sequence of solution names, which will be empty if there is the
+      entries file hasn't been created yet.
+    """
+    scope = {}
+    filename = os.path.join(self._root_dir, self._options.entries_filename)
+    if not self._options.path_exists(filename):
+      return []
+    exec(FileRead(filename), scope)
+    return scope["entries"]
+
+  class FromImpl:
+    """Used to implement the From syntax."""
+
+    def __init__(self, module_name):
+      self.module_name = module_name
+
+    def __str__(self):
+      return 'From("%s")' % self.module_name
+
+  class _VarImpl:
+    def __init__(self, custom_vars, local_scope):
+      self._custom_vars = custom_vars
+      self._local_scope = local_scope
+
+    def Lookup(self, var_name):
+      """Implements the Var syntax."""
+      if var_name in self._custom_vars:
+        return self._custom_vars[var_name]
+      elif var_name in self._local_scope.get("vars", {}):
+        return self._local_scope["vars"][var_name]
+      raise Error("Var is not defined: %s" % var_name)
+
+  def _ParseSolutionDeps(self, solution_name, solution_deps_content,
+                         custom_vars):
+    """Parses the DEPS file for the specified solution.
+
+    Args:
+      solution_name: The name of the solution to query.
+      solution_deps_content: Content of the DEPS file for the solution
+      custom_vars: A dict of vars to override any vars defined in the DEPS file.
+
+    Returns:
+      A dict mapping module names (as relative paths) to URLs or an empty
+      dict if the solution does not have a DEPS file.
+    """
+    # Skip empty
+    if not solution_deps_content:
+      return {}
+    # Eval the content
+    local_scope = {}
+    var = self._VarImpl(custom_vars, local_scope)
+    global_scope = {"From": self.FromImpl, "Var": var.Lookup, "deps_os": {}}
+    exec(solution_deps_content, global_scope, local_scope)
+    deps = local_scope.get("deps", {})
+
+    # load os specific dependencies if defined.  these dependencies may
+    # override or extend the values defined by the 'deps' member.
+    if "deps_os" in local_scope:
+      deps_os_choices = {
+          "win32": "win",
+          "win": "win",
+          "cygwin": "win",
+          "darwin": "mac",
+          "mac": "mac",
+          "unix": "unix",
+          "linux": "unix",
+          "linux2": "unix",
+         }
+
+      if self._options.deps_os is not None:
+        deps_to_include = self._options.deps_os.split(",")
+        if "all" in deps_to_include:
+          deps_to_include = deps_os_choices.values()
+      else:
+        deps_to_include = [deps_os_choices.get(self._options.platform, "unix")]
+
+      deps_to_include = set(deps_to_include)
+      for deps_os_key in deps_to_include:
+        os_deps = local_scope["deps_os"].get(deps_os_key, {})
+        if len(deps_to_include) > 1:
+          # Ignore any overrides when including deps for more than one
+          # platform, so we collect the broadest set of dependencies available.
+          # We may end up with the wrong revision of something for our
+          # platform, but this is the best we can do.
+          deps.update([x for x in os_deps.items() if not x[0] in deps])
+        else:
+          deps.update(os_deps)
+
+    if 'hooks' in local_scope:
+      self._deps_hooks.extend(local_scope['hooks'])
+
+    # If use_relative_paths is set in the DEPS file, regenerate
+    # the dictionary using paths relative to the directory containing
+    # the DEPS file.
+    if local_scope.get('use_relative_paths'):
+      rel_deps = {}
+      for d, url in deps.items():
+        # normpath is required to allow DEPS to use .. in their
+        # dependency local path.
+        rel_deps[os.path.normpath(os.path.join(solution_name, d))] = url
+      return rel_deps
+    else:
+      return deps
+
+  def _ParseAllDeps(self, solution_urls, solution_deps_content):
+    """Parse the complete list of dependencies for the client.
+
+    Args:
+      solution_urls: A dict mapping module names (as relative paths) to URLs
+        corresponding to the solutions specified by the client.  This parameter
+        is passed as an optimization.
+      solution_deps_content: A dict mapping module names to the content
+        of their DEPS files
+
+    Returns:
+      A dict mapping module names (as relative paths) to URLs corresponding
+      to the entire set of dependencies to checkout for the given client.
+
+    Raises:
+      Error: If a dependency conflicts with another dependency or of a solution.
+    """
+    deps = {}
+    for solution in self.GetVar("solutions"):
+      custom_vars = solution.get("custom_vars", {})
+      solution_deps = self._ParseSolutionDeps(
+                              solution["name"],
+                              solution_deps_content[solution["name"]],
+                              custom_vars)
+
+      # If a line is in custom_deps, but not in the solution, we want to append
+      # this line to the solution.
+      if "custom_deps" in solution:
+        for d in solution["custom_deps"]:
+          if d not in solution_deps:
+            solution_deps[d] = solution["custom_deps"][d]
+
+      for d in solution_deps:
+        if "custom_deps" in solution and d in solution["custom_deps"]:
+          # Dependency is overriden.
+          url = solution["custom_deps"][d]
+          if url is None:
+            continue
+        else:
+          url = solution_deps[d]
+          # if we have a From reference dependent on another solution, then
+          # just skip the From reference. When we pull deps for the solution,
+          # we will take care of this dependency.
+          #
+          # If multiple solutions all have the same From reference, then we
+          # should only add one to our list of dependencies.
+          if type(url) != str:
+            if url.module_name in solution_urls:
+              # Already parsed.
+              continue
+            if d in deps and type(deps[d]) != str:
+              if url.module_name == deps[d].module_name:
+                continue
+          else:
+            parsed_url = urlparse.urlparse(url)
+            scheme = parsed_url[0]
+            if not scheme:
+              # A relative url. Fetch the real base.
+              path = parsed_url[2]
+              if path[0] != "/":
+                raise Error(
+                    "relative DEPS entry \"%s\" must begin with a slash" % d)
+              # Create a scm just to query the full url.
+              scm = self._options.scm_wrapper(solution["url"], self._root_dir,
+                                              None)
+              url = scm.FullUrlForRelativeUrl(url)
+        if d in deps and deps[d] != url:
+          raise Error(
+              "Solutions have conflicting versions of dependency \"%s\"" % d)
+        if d in solution_urls and solution_urls[d] != url:
+          raise Error(
+              "Dependency \"%s\" conflicts with specified solution" % d)
+        # Grab the dependency.
+        deps[d] = url
+    return deps
+
+  def _RunHookAction(self, hook_dict):
+    """Runs the action from a single hook.
+    """
+    command = hook_dict['action'][:]
+    if command[0] == 'python':
+      # If the hook specified "python" as the first item, the action is a
+      # Python script.  Run it by starting a new copy of the same
+      # interpreter.
+      command[0] = sys.executable
+
+    # Use a discrete exit status code of 2 to indicate that a hook action
+    # failed.  Users of this script may wish to treat hook action failures
+    # differently from VC failures.
+    SubprocessCall(command, self._root_dir, self._options.stdout,
+                   fail_status=2)
+
+  def _RunHooks(self, command, file_list, is_using_git):
+    """Evaluates all hooks, running actions as needed.
+    """
+    # Hooks only run for these command types.
+    if not command in ('update', 'revert', 'runhooks'):
+      return
+
+    # Get any hooks from the .gclient file.
+    hooks = self.GetVar("hooks", [])
+    # Add any hooks found in DEPS files.
+    hooks.extend(self._deps_hooks)
+
+    # If "--force" was specified, run all hooks regardless of what files have
+    # changed.  If the user is using git, then we don't know what files have
+    # changed so we always run all hooks.
+    if self._options.force or is_using_git:
+      for hook_dict in hooks:
+        self._RunHookAction(hook_dict)
+      return
+
+    # Run hooks on the basis of whether the files from the gclient operation
+    # match each hook's pattern.
+    for hook_dict in hooks:
+      pattern = re.compile(hook_dict['pattern'])
+      for file in file_list:
+        if not pattern.search(file):
+          continue
+
+        self._RunHookAction(hook_dict)
+
+        # The hook's action only runs once.  Don't bother looking for any
+        # more matches.
+        break
+
+  def RunOnDeps(self, command, args):
+    """Runs a command on each dependency in a client and its dependencies.
+
+    The module's dependencies are specified in its top-level DEPS files.
+
+    Args:
+      command: The command to use (e.g., 'status' or 'diff')
+      args: list of str - extra arguments to add to the command line.
+
+    Raises:
+      Error: If the client has conflicting entries.
+    """
+    if not command in self.supported_commands:
+      raise Error("'%s' is an unsupported command" % command)
+
+    # Check for revision overrides.
+    revision_overrides = {}
+    for revision in self._options.revisions:
+      if revision.find("@") == -1:
+        raise Error(
+            "Specify the full dependency when specifying a revision number.")
+      revision_elem = revision.split("@")
+      # Disallow conflicting revs
+      if revision_overrides.has_key(revision_elem[0]) and \
+         revision_overrides[revision_elem[0]] != revision_elem[1]:
+        raise Error(
+            "Conflicting revision numbers specified.")
+      revision_overrides[revision_elem[0]] = revision_elem[1]
+
+    solutions = self.GetVar("solutions")
+    if not solutions:
+      raise Error("No solution specified")
+
+    # When running runhooks --force, there's no need to consult the SCM.
+    # All known hooks are expected to run unconditionally regardless of working
+    # copy state, so skip the SCM status check.
+    run_scm = not (command == 'runhooks' and self._options.force)
+
+    entries = {}
+    entries_deps_content = {}
+    file_list = []
+    # Run on the base solutions first.
+    for solution in solutions:
+      name = solution["name"]
+      if name in entries:
+        raise Error("solution %s specified more than once" % name)
+      url = solution["url"]
+      entries[name] = url
+      if run_scm:
+        self._options.revision = revision_overrides.get(name)
+        scm = self._options.scm_wrapper(url, self._root_dir, name)
+        scm.RunCommand(command, self._options, args, file_list)
+        self._options.revision = None
+      try:
+        deps_content = FileRead(os.path.join(self._root_dir, name,
+                                             self._options.deps_file))
+      except IOError, e:
+        if e.errno != errno.ENOENT:
+          raise
+        deps_content = ""
+      entries_deps_content[name] = deps_content
+
+    # Process the dependencies next (sort alphanumerically to ensure that
+    # containing directories get populated first and for readability)
+    deps = self._ParseAllDeps(entries, entries_deps_content)
+    deps_to_process = deps.keys()
+    deps_to_process.sort()
+
+    # First pass for direct dependencies.
+    for d in deps_to_process:
+      if type(deps[d]) == str:
+        url = deps[d]
+        entries[d] = url
+        if run_scm:
+          self._options.revision = revision_overrides.get(d)
+          scm = self._options.scm_wrapper(url, self._root_dir, d)
+          scm.RunCommand(command, self._options, args, file_list)
+          self._options.revision = None
+
+    # Second pass for inherited deps (via the From keyword)
+    for d in deps_to_process:
+      if type(deps[d]) != str:
+        sub_deps = self._ParseSolutionDeps(
+                           deps[d].module_name,
+                           FileRead(os.path.join(self._root_dir,
+                                                 deps[d].module_name,
+                                                 self._options.deps_file)),
+                           {})
+        url = sub_deps[d]
+        entries[d] = url
+        if run_scm:
+          self._options.revision = revision_overrides.get(d)
+          scm = self._options.scm_wrapper(url, self._root_dir, d)
+          scm.RunCommand(command, self._options, args, file_list)
+          self._options.revision = None
+
+    is_using_git = IsUsingGit(self._root_dir, entries.keys())
+    self._RunHooks(command, file_list, is_using_git)
+
+    if command == 'update':
+      # notify the user if there is an orphaned entry in their working copy.
+      # TODO(darin): we should delete this directory manually if it doesn't
+      # have any changes in it.
+      prev_entries = self._ReadEntries()
+      for entry in prev_entries:
+        e_dir = os.path.join(self._root_dir, entry)
+        if entry not in entries and self._options.path_exists(e_dir):
+          if CaptureSVNStatus(self._options, e_dir):
+            # There are modified files in this entry
+            entries[entry] = None  # Keep warning until removed.
+            print >> self._options.stdout, (
+                "\nWARNING: \"%s\" is no longer part of this client.  "
+                "It is recommended that you manually remove it.\n") % entry
+          else:
+            # Delete the entry
+            print >> self._options.stdout, ("\n________ deleting \'%s\' " +
+                "in \'%s\'") % (entry, self._root_dir)
+            RemoveDirectory(e_dir)
+      # record the current list of entries for next time
+      self._SaveEntries(entries)
+
+  def PrintRevInfo(self):
+    """Output revision info mapping for the client and its dependencies. This
+    allows the capture of a overall "revision" for the source tree that can
+    be used to reproduce the same tree in the future. The actual output
+    contains enough information (source paths, svn server urls and revisions)
+    that it can be used either to generate external svn commands (without
+    gclient) or as input to gclient's --rev option (with some massaging of
+    the data).
+
+    NOTE: Unlike RunOnDeps this does not require a local checkout and is run
+    on the Pulse master. It MUST NOT execute hooks.
+
+    Raises:
+      Error: If the client has conflicting entries.
+    """
+    # Check for revision overrides.
+    revision_overrides = {}
+    for revision in self._options.revisions:
+      if revision.find("@") < 0:
+        raise Error(
+            "Specify the full dependency when specifying a revision number.")
+      revision_elem = revision.split("@")
+      # Disallow conflicting revs
+      if revision_overrides.has_key(revision_elem[0]) and \
+         revision_overrides[revision_elem[0]] != revision_elem[1]:
+        raise Error(
+            "Conflicting revision numbers specified.")
+      revision_overrides[revision_elem[0]] = revision_elem[1]
+
+    solutions = self.GetVar("solutions")
+    if not solutions:
+      raise Error("No solution specified")
+
+    entries = {}
+    entries_deps_content = {}
+
+    # Inner helper to generate base url and rev tuple (including honoring
+    # |revision_overrides|)
+    def GetURLAndRev(name, original_url):
+      if original_url.find("@") < 0:
+        if revision_overrides.has_key(name):
+          return (original_url, int(revision_overrides[name]))
+        else:
+          # TODO(aharper): SVN/SCMWrapper cleanup (non-local commandset)
+          return (original_url, CaptureSVNHeadRevision(self._options,
+                                                       original_url))
+      else:
+        url_components = original_url.split("@")
+        if revision_overrides.has_key(name):
+          return (url_components[0], int(revision_overrides[name]))
+        else:
+          return (url_components[0], int(url_components[1]))
+
+    # Run on the base solutions first.
+    for solution in solutions:
+      name = solution["name"]
+      if name in entries:
+        raise Error("solution %s specified more than once" % name)
+      (url, rev) = GetURLAndRev(name, solution["url"])
+      entries[name] = "%s@%d" % (url, rev)
+      # TODO(aharper): SVN/SCMWrapper cleanup (non-local commandset)
+      entries_deps_content[name] = CaptureSVN(
+                                     self._options,
+                                     ["cat",
+                                      "%s/%s@%d" % (url,
+                                                    self._options.deps_file,
+                                                    rev)],
+                                     os.getcwd())
+
+    # Process the dependencies next (sort alphanumerically to ensure that
+    # containing directories get populated first and for readability)
+    deps = self._ParseAllDeps(entries, entries_deps_content)
+    deps_to_process = deps.keys()
+    deps_to_process.sort()
+
+    # First pass for direct dependencies.
+    for d in deps_to_process:
+      if type(deps[d]) == str:
+        (url, rev) = GetURLAndRev(d, deps[d])
+        entries[d] = "%s@%d" % (url, rev)
+
+    # Second pass for inherited deps (via the From keyword)
+    for d in deps_to_process:
+      if type(deps[d]) != str:
+        deps_parent_url = entries[deps[d].module_name]
+        if deps_parent_url.find("@") < 0:
+          raise Error("From %s missing revisioned url" % deps[d].module_name)
+        deps_parent_url_components = deps_parent_url.split("@")
+        # TODO(aharper): SVN/SCMWrapper cleanup (non-local commandset)
+        deps_parent_content = CaptureSVN(
+                                self._options,
+                                ["cat",
+                                 "%s/%s@%s" % (deps_parent_url_components[0],
+                                               self._options.deps_file,
+                                               deps_parent_url_components[1])],
+                                os.getcwd())
+        sub_deps = self._ParseSolutionDeps(
+                           deps[d].module_name,
+                           FileRead(os.path.join(self._root_dir,
+                                                 deps[d].module_name,
+                                                 self._options.deps_file)),
+                           {})
+        (url, rev) = GetURLAndRev(d, sub_deps[d])
+        entries[d] = "%s@%d" % (url, rev)
+
+    print ";".join(["%s,%s" % (x, entries[x]) for x in sorted(entries.keys())])
+
+
+## gclient commands.
+
+
+def DoCleanup(options, args):
+  """Handle the cleanup subcommand.
+
+  Raises:
+    Error: if client isn't configured properly.
+  """
+  client = options.gclient.LoadCurrentConfig(options)
+  if not client:
+    raise Error("client not configured; see 'gclient config'")
+  if options.verbose:
+    # Print out the .gclient file.  This is longer than if we just printed the
+    # client dict, but more legible, and it might contain helpful comments.
+    print >>options.stdout, client.ConfigContent()
+  options.verbose = True
+  return client.RunOnDeps('cleanup', args)
+
+
+def DoConfig(options, args):
+  """Handle the config subcommand.
+
+  Args:
+    options: If options.spec set, a string providing contents of config file.
+    args: The command line args.  If spec is not set,
+          then args[0] is a string URL to get for config file.
+
+  Raises:
+    Error: on usage error
+  """
+  if len(args) < 1 and not options.spec:
+    raise Error("required argument missing; see 'gclient help config'")
+  if options.path_exists(options.config_filename):
+    raise Error("%s file already exists in the current directory" %
+                options.config_filename)
+  client = options.gclient('.', options)
+  if options.spec:
+    client.SetConfig(options.spec)
+  else:
+    # TODO(darin): it would be nice to be able to specify an alternate relpath
+    # for the given URL.
+    base_url = args[0]
+    name = args[0].split("/")[-1]
+    safesync_url = ""
+    if len(args) > 1:
+      safesync_url = args[1]
+    client.SetDefaultConfig(name, base_url, safesync_url)
+  client.SaveConfig()
+
+
+def DoHelp(options, args):
+  """Handle the help subcommand giving help for another subcommand.
+
+  Raises:
+    Error: if the command is unknown.
+  """
+  if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:
+    print >>options.stdout, COMMAND_USAGE_TEXT[args[0]]
+  else:
+    raise Error("unknown subcommand '%s'; see 'gclient help'" % args[0])
+
+
+def DoStatus(options, args):
+  """Handle the status subcommand.
+
+  Raises:
+    Error: if client isn't configured properly.
+  """
+  client = options.gclient.LoadCurrentConfig(options)
+  if not client:
+    raise Error("client not configured; see 'gclient config'")
+  if options.verbose:
+    # Print out the .gclient file.  This is longer than if we just printed the
+    # client dict, but more legible, and it might contain helpful comments.
+    print >>options.stdout, client.ConfigContent()
+  options.verbose = True
+  return client.RunOnDeps('status', args)
+
+
+def DoUpdate(options, args):
+  """Handle the update and sync subcommands.
+
+  Raises:
+    Error: if client isn't configured properly.
+  """
+  client = options.gclient.LoadCurrentConfig(options)
+
+  if not client:
+    raise Error("client not configured; see 'gclient config'")
+
+  if not options.head:
+    solutions = client.GetVar('solutions')
+    if solutions:
+      for s in solutions:
+        if s.get('safesync_url', ''):
+          # rip through revisions and make sure we're not over-riding
+          # something that was explicitly passed
+          has_key = False
+          for r in options.revisions:
+            if r.split('@')[0] == s['name']:
+              has_key = True
+              break
+
+          if not has_key:
+            handle = urllib.urlopen(s['safesync_url'])
+            rev = handle.read().strip()
+            handle.close()
+            if len(rev):
+              options.revisions.append(s['name']+'@'+rev)
+
+  if options.verbose:
+    # Print out the .gclient file.  This is longer than if we just printed the
+    # client dict, but more legible, and it might contain helpful comments.
+    print >>options.stdout, client.ConfigContent()
+  return client.RunOnDeps('update', args)
+
+
+def DoDiff(options, args):
+  """Handle the diff subcommand.
+
+  Raises:
+    Error: if client isn't configured properly.
+  """
+  client = options.gclient.LoadCurrentConfig(options)
+  if not client:
+    raise Error("client not configured; see 'gclient config'")
+  if options.verbose:
+    # Print out the .gclient file.  This is longer than if we just printed the
+    # client dict, but more legible, and it might contain helpful comments.
+    print >>options.stdout, client.ConfigContent()
+  options.verbose = True
+  return client.RunOnDeps('diff', args)
+
+
+def DoRevert(options, args):
+  """Handle the revert subcommand.
+
+  Raises:
+    Error: if client isn't configured properly.
+  """
+  client = options.gclient.LoadCurrentConfig(options)
+  if not client:
+    raise Error("client not configured; see 'gclient config'")
+  return client.RunOnDeps('revert', args)
+
+
+def DoRunHooks(options, args):
+  """Handle the runhooks subcommand.
+
+  Raises:
+    Error: if client isn't configured properly.
+  """
+  client = options.gclient.LoadCurrentConfig(options)
+  if not client:
+    raise Error("client not configured; see 'gclient config'")
+  if options.verbose:
+    # Print out the .gclient file.  This is longer than if we just printed the
+    # client dict, but more legible, and it might contain helpful comments.
+    print >>options.stdout, client.ConfigContent()
+  return client.RunOnDeps('runhooks', args)
+
+
+def DoRevInfo(options, args):
+  """Handle the revinfo subcommand.
+
+  Raises:
+    Error: if client isn't configured properly.
+  """
+  client = options.gclient.LoadCurrentConfig(options)
+  if not client:
+    raise Error("client not configured; see 'gclient config'")
+  client.PrintRevInfo()
+
+
+gclient_command_map = {
+  "cleanup": DoCleanup,
+  "config": DoConfig,
+  "diff": DoDiff,
+  "help": DoHelp,
+  "status": DoStatus,
+  "sync": DoUpdate,
+  "update": DoUpdate,
+  "revert": DoRevert,
+  "runhooks": DoRunHooks,
+  "revinfo" : DoRevInfo,
+}
+
+
+def DispatchCommand(command, options, args, command_map=None):
+  """Dispatches the appropriate subcommand based on command line arguments."""
+  if command_map is None:
+    command_map = gclient_command_map
+
+  if command in command_map:
+    return command_map[command](options, args)
+  else:
+    raise Error("unknown subcommand '%s'; see 'gclient help'" % command)
+
+
+def Main(argv):
+  """Parse command line arguments and dispatch command."""
+
+  option_parser = optparse.OptionParser(usage=DEFAULT_USAGE_TEXT,
+                                        version=__version__)
+  option_parser.disable_interspersed_args()
+  option_parser.add_option("", "--force", action="store_true", default=False,
+                           help=("(update/sync only) force update even "
+                                 "for modules which haven't changed"))
+  option_parser.add_option("", "--revision", action="append", dest="revisions",
+                           metavar="REV", default=[],
+                           help=("(update/sync only) sync to a specific "
+                                 "revision, can be used multiple times for "
+                                 "each solution, e.g. --revision=src@123, "
+                                 "--revision=internal@32"))
+  option_parser.add_option("", "--deps", default=None, dest="deps_os",
+                           metavar="OS_LIST",
+                           help=("(update/sync only) sync deps for the "
+                                 "specified (comma-separated) platform(s); "
+                                 "'all' will sync all platforms"))
+  option_parser.add_option("", "--spec", default=None,
+                           help=("(config only) create a gclient file "
+                                 "containing the provided string"))
+  option_parser.add_option("", "--verbose", action="store_true", default=False,
+                           help="produce additional output for diagnostics")
+  option_parser.add_option("", "--manually_grab_svn_rev", action="store_true",
+                           default=False,
+                           help="Skip svn up whenever possible by requesting "
+                                "actual HEAD revision from the repository")
+  option_parser.add_option("", "--head", action="store_true", default=False,
+                           help=("skips any safesync_urls specified in "
+                                 "configured solutions"))
+
+  if len(argv) < 2:
+    # Users don't need to be told to use the 'help' command.
+    option_parser.print_help()
+    return 1
+  # Add manual support for --version as first argument.
+  if argv[1] == '--version':
+    option_parser.print_version()
+    return 0
+
+  # Add manual support for --help as first argument.
+  if argv[1] == '--help':
+    argv[1] = 'help'
+
+  command = argv[1]
+  options, args = option_parser.parse_args(argv[2:])
+
+  if len(argv) < 3 and command == "help":
+    option_parser.print_help()
+    return 0
+
+  # Files used for configuration and state saving.
+  options.config_filename = os.environ.get("GCLIENT_FILE", ".gclient")
+  options.entries_filename = ".gclient_entries"
+  options.deps_file = "DEPS"
+
+  # These are overridded when testing. They are not externally visible.
+  options.stdout = sys.stdout
+  options.path_exists = os.path.exists
+  options.gclient = GClient
+  options.scm_wrapper = SCMWrapper
+  options.platform = sys.platform
+  return DispatchCommand(command, options, args)
+
+
+if "__main__" == __name__:
+  try:
+    result = Main(sys.argv)
+  except Error, e:
+    print "Error: %s" % str(e)
+    result = 1
+  sys.exit(result)
+
+# vim: ts=2:sw=2:tw=80:et:

+ 682 - 0
git-cl.py

@@ -0,0 +1,682 @@
+#!/usr/bin/python
+# git-cl -- a git-command for integrating reviews on Rietveld
+# Copyright (C) 2008 Evan Martin <martine@danga.com>
+
+import getpass
+import optparse
+import os
+import re
+import readline
+import subprocess
+import sys
+import tempfile
+import textwrap
+import upload
+import urllib2
+
+DEFAULT_SERVER = 'codereview.appspot.com'
+
+def DieWithError(message):
+  print >>sys.stderr, message
+  sys.exit(1)
+
+
+def RunGit(args, error_ok=False, error_message=None, exit_code=False):
+  cmd = ['git'] + args
+  # Useful for debugging:
+  # print >>sys.stderr, ' '.join(cmd)
+  proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+  output = proc.communicate()[0]
+  if exit_code:
+    return proc.returncode
+  if not error_ok and proc.returncode != 0:
+    DieWithError('Command "%s" failed.\n' % (' '.join(cmd)) +
+                 (error_message or output))
+  return output
+
+
+class Settings:
+  def __init__(self):
+    self.server = None
+    self.cc = None
+    self.is_git_svn = None
+    self.svn_branch = None
+    self.tree_status_url = None
+    self.viewvc_url = None
+
+  def GetServer(self, error_ok=False):
+    if not self.server:
+      if not error_ok:
+        error_message = ('You must configure your review setup by running '
+                         '"git cl config".')
+        self.server = self._GetConfig('rietveld.server',
+                                      error_message=error_message)
+      else:
+        self.server = self._GetConfig('rietveld.server', error_ok=True)
+    return self.server
+
+  def GetCCList(self):
+    if self.cc is None:
+      self.cc = self._GetConfig('rietveld.cc', error_ok=True)
+    return self.cc
+
+  def GetIsGitSvn(self):
+    """Return true if this repo looks like it's using git-svn."""
+    if self.is_git_svn is None:
+      # If you have any "svn-remote.*" config keys, we think you're using svn.
+      self.is_git_svn = RunGit(['config', '--get-regexp', r'^svn-remote\.'],
+                               exit_code=True) == 0
+    return self.is_git_svn
+
+  def GetSVNBranch(self):
+    if self.svn_branch is None:
+      if not self.GetIsGitSvn():
+        raise "Repo doesn't appear to be a git-svn repo."
+
+      # Try to figure out which remote branch we're based on.
+      # Strategy:
+      # 1) find all git-svn branches and note their svn URLs.
+      # 2) iterate through our branch history and match up the URLs.
+
+      # regexp matching the git-svn line that contains the URL.
+      git_svn_re = re.compile(r'^\s*git-svn-id: (\S+)@', re.MULTILINE)
+
+      # Get the refname and svn url for all refs/remotes/*.
+      remotes = RunGit(['for-each-ref', '--format=%(refname)',
+                        'refs/remotes']).splitlines()
+      svn_refs = {}
+      for ref in remotes:
+        # git-svn remote refs are generally directly in the refs/remotes/dir,
+        # not a subdirectory (like refs/remotes/origin/master).
+        if '/' in ref[len('refs/remotes/'):]:
+          continue
+        match = git_svn_re.search(RunGit(['cat-file', '-p', ref]))
+        if match:
+          svn_refs[match.group(1)] = ref
+
+      if len(svn_refs) == 1:
+        # Only one svn branch exists -- seems like a good candidate.
+        self.svn_branch = svn_refs.values()[0]
+      elif len(svn_refs) > 1:
+        # We have more than one remote branch available.  We don't
+        # want to go through all of history, so read a line from the
+        # pipe at a time.
+        # The -100 is an arbitrary limit so we don't search forever.
+        cmd = ['git', 'log', '-100']
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+        for line in proc.stdout:
+          match = git_svn_re.match(line)
+          if match:
+            url = match.group(1)
+            if url in svn_refs:
+              self.svn_branch = svn_refs[url]
+              proc.stdout.close()  # Cut pipe.
+              break
+
+      if not self.svn_branch:
+        raise "Can't guess svn branch -- try specifying it on the command line"
+
+    return self.svn_branch
+
+  def GetTreeStatusUrl(self, error_ok=False):
+    if not self.tree_status_url:
+      error_message = ('You must configure your tree status URL by running '
+                       '"git cl config".')
+      self.tree_status_url = self._GetConfig('rietveld.tree-status-url',
+                                             error_ok=error_ok,
+                                             error_message=error_message)
+    return self.tree_status_url
+
+  def GetViewVCUrl(self):
+    if not self.viewvc_url:
+      self.viewvc_url = self._GetConfig('rietveld.viewvc-url', error_ok=True)
+    return self.viewvc_url
+
+  def _GetConfig(self, param, **kwargs):
+    return RunGit(['config', param], **kwargs).strip()
+
+
+settings = Settings()
+
+
+did_migrate_check = False
+def CheckForMigration():
+  """Migrate from the old issue format, if found.
+
+  We used to store the branch<->issue mapping in a file in .git, but it's
+  better to store it in the .git/config, since deleting a branch deletes that
+  branch's entry there.
+  """
+
+  # Don't run more than once.
+  global did_migrate_check
+  if did_migrate_check:
+    return
+
+  gitdir = RunGit(['rev-parse', '--git-dir']).strip()
+  storepath = os.path.join(gitdir, 'cl-mapping')
+  if os.path.exists(storepath):
+    print "old-style git-cl mapping file (%s) found; migrating." % storepath
+    store = open(storepath, 'r')
+    for line in store:
+      branch, issue = line.strip().split()
+      RunGit(['config', 'branch.%s.rietveldissue' % ShortBranchName(branch),
+              issue])
+    store.close()
+    os.remove(storepath)
+  did_migrate_check = True
+
+
+def IssueURL(issue):
+  """Get the URL for a particular issue."""
+  return 'http://%s/%s' % (settings.GetServer(), issue)
+
+
+def ShortBranchName(branch):
+  """Convert a name like 'refs/heads/foo' to just 'foo'."""
+  return branch.replace('refs/heads/', '')
+
+
+class Changelist:
+  def __init__(self, branchref=None):
+    # Poke settings so we get the "configure your server" message if necessary.
+    settings.GetServer()
+    self.branchref = branchref
+    if self.branchref:
+      self.branch = ShortBranchName(self.branchref)
+    else:
+      self.branch = None
+    self.upstream_branch = None
+    self.has_issue = False
+    self.issue = None
+    self.has_description = False
+    self.description = None
+
+  def GetBranch(self):
+    """Returns the short branch name, e.g. 'master'."""
+    if not self.branch:
+      self.branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
+      self.branch = ShortBranchName(self.branchref)
+    return self.branch
+  def GetBranchRef(self):
+    """Returns the full branch name, e.g. 'refs/heads/master'."""
+    self.GetBranch()  # Poke the lazy loader.
+    return self.branchref
+
+  def GetUpstreamBranch(self):
+    if self.upstream_branch is None:
+      branch = self.GetBranch()
+      upstream_branch = RunGit(['config', 'branch.%s.merge' % branch],
+                               error_ok=True).strip()
+      if upstream_branch:
+        remote = RunGit(['config', 'branch.%s.remote' % branch]).strip()
+        # We have remote=origin and branch=refs/heads/foobar; convert to
+        # refs/remotes/origin/foobar.
+        self.upstream_branch = upstream_branch.replace('heads',
+                                                       'remotes/' + remote)
+
+      if not self.upstream_branch:
+        # Fall back on trying a git-svn upstream branch.
+        if settings.GetIsGitSvn():
+          self.upstream_branch = settings.GetSVNBranch()
+
+      if not self.upstream_branch:
+        DieWithError("""Unable to determine default branch to diff against.
+Either pass complete "git diff"-style arguments, like
+  git cl upload origin/master
+or verify this branch is set up to track another (via the --track argument to
+"git checkout -b ...").""")
+
+    return self.upstream_branch
+
+  def GetIssue(self):
+    if not self.has_issue:
+      CheckForMigration()
+      issue = RunGit(['config', self._IssueSetting()], error_ok=True).strip()
+      if issue:
+        self.issue = issue
+      else:
+        self.issue = None
+      self.has_issue = True
+    return self.issue
+
+  def GetIssueURL(self):
+    return IssueURL(self.GetIssue())
+
+  def GetDescription(self, pretty=False):
+    if not self.has_description:
+      if self.GetIssue():
+        url = self.GetIssueURL() + '/description'
+        self.description = urllib2.urlopen(url).read().strip()
+      self.has_description = True
+    if pretty:
+      wrapper = textwrap.TextWrapper()
+      wrapper.initial_indent = wrapper.subsequent_indent = '  '
+      return wrapper.fill(self.description)
+    return self.description
+
+  def GetPatchset(self):
+    if not self.has_patchset:
+      patchset = RunGit(['config', self._PatchsetSetting()],
+                        error_ok=True).strip()
+      if patchset:
+        self.patchset = patchset
+      else:
+        self.patchset = None
+      self.has_patchset = True
+    return self.patchset
+
+  def SetPatchset(self, patchset):
+    """Set this branch's patchset.  If patchset=0, clears the patchset."""
+    if patchset:
+      RunGit(['config', self._PatchsetSetting(), str(patchset)])
+    else:
+      RunGit(['config', '--unset', self._PatchsetSetting()])
+    self.has_patchset = False
+
+  def SetIssue(self, issue):
+    """Set this branch's issue.  If issue=0, clears the issue."""
+    if issue:
+      RunGit(['config', self._IssueSetting(), str(issue)])
+    else:
+      RunGit(['config', '--unset', self._IssueSetting()])
+      self.SetPatchset(0)
+    self.has_issue = False
+
+  def CloseIssue(self):
+    def GetUserCredentials():
+      email = raw_input('Email: ').strip()
+      password = getpass.getpass('Password for %s: ' % email)
+      return email, password
+
+    rpc_server = upload.HttpRpcServer(settings.GetServer(),
+                                      GetUserCredentials,
+                                      host_override=settings.GetServer(),
+                                      save_cookies=True)
+    # You cannot close an issue with a GET.
+    # We pass an empty string for the data so it is a POST rather than a GET.
+    data = [("description", self.description),]
+    ctype, body = upload.EncodeMultipartFormData(data, [])
+    rpc_server.Send('/' + self.GetIssue() + '/close', body, ctype)
+
+  def _IssueSetting(self):
+    """Return the git setting that stores this change's issue."""
+    return 'branch.%s.rietveldissue' % self.GetBranch()
+
+  def _PatchsetSetting(self):
+    """Return the git setting that stores this change's most recent patchset."""
+    return 'branch.%s.rietveldpatchset' % self.GetBranch()
+
+
+def CmdConfig(args):
+  server = settings.GetServer(error_ok=True)
+  prompt = 'Rietveld server (host[:port])'
+  prompt += ' [%s]' % (server or DEFAULT_SERVER)
+  newserver = raw_input(prompt + ': ')
+  if not server and not newserver:
+    newserver = DEFAULT_SERVER
+  if newserver and newserver != server:
+    RunGit(['config', 'rietveld.server', newserver])
+
+  def SetProperty(initial, caption, name):
+    prompt = caption
+    if initial:
+      prompt += ' ("x" to clear) [%s]' % initial
+    new_val = raw_input(prompt + ': ')
+    if new_val == 'x':
+      RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
+    elif new_val and new_val != initial:
+      RunGit(['config', 'rietveld.' + name, new_val])
+
+  SetProperty(settings.GetCCList(), 'CC list', 'cc')
+  SetProperty(settings.GetTreeStatusUrl(error_ok=True), 'Tree status URL',
+              'tree-status-url')
+  SetProperty(settings.GetViewVCUrl(), 'ViewVC URL', 'viewvc-url')
+
+  # TODO: configure a default branch to diff against, rather than this
+  # svn-based hackery.
+
+
+def CmdStatus(args):
+  branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
+  if branches:
+    print 'Branches associated with reviews:'
+    for branch in sorted(branches.splitlines()):
+      cl = Changelist(branchref=branch)
+      print "  %10s: %s" % (cl.GetBranch(), cl.GetIssue())
+
+  cl = Changelist()
+  print
+  print 'Current branch:',
+  if not cl.GetIssue():
+    print 'no issue assigned.'
+    return 0
+  print cl.GetBranch()
+  print 'Issue number:', cl.GetIssue(), '(%s)' % cl.GetIssueURL()
+  print 'Issue description:'
+  print cl.GetDescription(pretty=True)
+
+
+def CmdIssue(args):
+  cl = Changelist()
+  if len(args) > 0:
+    cl.SetIssue(int(args[0]))
+  print 'Issue number:', cl.GetIssue(), '(%s)' % cl.GetIssueURL()
+
+
+def UserEditedLog(starting_text):
+  """Given some starting text, let the user edit it and return the result."""
+  editor = os.getenv('EDITOR', 'vi')
+
+  file = tempfile.NamedTemporaryFile()
+  filename = file.name
+  file.write(starting_text)
+  file.flush()
+
+  ret = subprocess.call(editor + ' ' + filename, shell=True)
+  if ret != 0:
+    return
+
+  text = open(filename).read()
+  file.close()
+  stripcomment_re = re.compile(r'^#.*$', re.MULTILINE)
+  return stripcomment_re.sub('', text).strip()
+
+
+def CmdUpload(args):
+  parser = optparse.OptionParser(
+      usage='git cl upload [options] [args to "git diff"]')
+  parser.add_option('-m', dest='message', help='message for patch')
+  parser.add_option('-r', '--reviewers',
+                    help='reviewer email addresses')
+  parser.add_option('--send-mail', action='store_true',
+                    help='send email to reviewer immediately')
+  (options, args) = parser.parse_args(args)
+
+  cl = Changelist()
+  if not args:
+    # Default to diffing against the "upstream" branch.
+    args = [cl.GetUpstreamBranch()]
+  # --no-ext-diff is broken in some versions of Git, so try to work around
+  # this by overriding the environment (but there is still a problem if the
+  # git config key "diff.external" is used).
+  env = os.environ.copy()
+  if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
+  subprocess.call(['git', 'diff', '--no-ext-diff', '--stat', ] + args, env=env)
+
+  upload_args = ['--assume_yes']  # Don't ask about untracked files.
+  upload_args.extend(['--server', settings.GetServer()])
+  if options.reviewers:
+    upload_args.extend(['--reviewers', options.reviewers])
+  upload_args.extend(['--cc', settings.GetCCList()])
+  if options.message:
+    upload_args.extend(['--message', options.message])
+  if options.send_mail:
+    if not options.reviewers:
+      DieWithError("Must specify reviewers to send email.")
+    upload_args.append('--send_mail')
+  if cl.GetIssue():
+    upload_args.extend(['--issue', cl.GetIssue()])
+    print ("This branch is associated with issue %s. "
+           "Adding patch to that issue." % cl.GetIssue())
+  else:
+    # Construct a description for this change from the log.
+    # We need to convert diff options to log options.
+    log_args = []
+    if len(args) == 1 and not args[0].endswith('.'):
+      log_args = [args[0] + '..']
+    elif len(args) == 2:
+      log_args = [args[0] + '..' + args[1]]
+    else:
+      log_args = args[:]  # Hope for the best!
+    desc = RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
+    initial_text = """# Enter a description of the change.
+# This will displayed on the codereview site.
+# The first line will also be used as the subject of the review."""
+    desc = UserEditedLog(initial_text + '\n' + desc)
+    if not desc:
+      print "Description empty; aborting."
+      return 1
+    subject = desc.splitlines()[0]
+    upload_args.extend(['--message', subject])
+    upload_args.extend(['--description', desc])
+  issue, patchset = upload.RealMain(['upload'] + upload_args + args)
+  if not cl.GetIssue():
+    cl.SetIssue(issue)
+  cl.SetPatchset(patchset)
+
+
+def CmdDCommit(args):
+  parser = optparse.OptionParser(
+      usage='git cl dcommit [options] [git-svn branch to apply against]')
+  parser.add_option('-f', action='store_true', dest='force',
+                    help="force yes to questions (don't prompt)")
+  parser.add_option('-c', dest='contributor',
+                    help="external contributor for patch (appended to " +
+                         "description)")
+  (options, args) = parser.parse_args(args)
+
+  cl = Changelist()
+
+  if not args:
+    # Default to merging against our best guess of the upstream branch.
+    args = [cl.GetUpstreamBranch()]
+
+  base_branch = args[0]
+
+  # It is important to have these checks at the top.  Not only for user
+  # convenience, but also because the cl object then caches the correct values
+  # of these fields even as we're juggling branches for setting up the commit.
+  if not cl.GetIssue():
+    print 'Current issue unknown -- has this branch been uploaded?'
+    return 1
+  if not cl.GetDescription():
+    print 'No description set.'
+    print 'Visit %s/edit to set it.' % (cl.GetIssueURL())
+    return 1
+
+  if RunGit(['diff-index', 'HEAD']):
+    print 'Cannot dcommit with a dirty tree.  You must commit locally first.'
+    return 1
+
+  # This rev-list syntax means "show all commits not in my branch that
+  # are in base_branch".
+  upstream_commits = RunGit(['rev-list', '^' + cl.GetBranchRef(),
+                             base_branch]).splitlines()
+  if upstream_commits:
+    print ('Base branch "%s" has %d commits '
+           'not in this branch.' % (base_branch, len(upstream_commits)))
+    print 'Run "git merge %s" before attempting to dcommit.' % base_branch
+    return 1
+
+  if not options.force:
+    # Check the tree status if the tree status URL is set.
+    status = GetTreeStatus()
+    if 'closed' == status:
+      print ('The tree is closed.  Please wait for it to reopen. Use '
+             '"git cl dcommit -f" to commit on a closed tree.')
+      return 1
+    elif 'unknown' == status:
+      print ('Unable to determine tree status.  Please verify manually and '
+             'use "git cl dcommit -f" to commit on a closed tree.')
+
+  description = cl.GetDescription()
+
+  description += "\n\nReview URL: %s" % cl.GetIssueURL()
+  if options.contributor:
+    description += "\nPatch from %s." % options.contributor
+  print 'Description:', repr(description)
+
+  branches = [base_branch, cl.GetBranchRef()]
+  if not options.force:
+    subprocess.call(['git', 'diff', '--stat'] + branches)
+    raw_input("About to commit; enter to confirm.")
+
+  # We want to squash all this branch's commits into one commit with the
+  # proper description.
+  # We do this by doing a "merge --squash" into a new commit branch, then
+  # dcommitting that.
+  MERGE_BRANCH = 'git-cl-commit'
+  # Delete the merge branch if it already exists.
+  if RunGit(['show-ref', '--quiet', '--verify', 'refs/heads/' + MERGE_BRANCH],
+            exit_code=True) == 0:
+    RunGit(['branch', '-D', MERGE_BRANCH])
+  # Stuff our change into the merge branch.
+  RunGit(['checkout', '-q', '-b', MERGE_BRANCH, base_branch])
+  RunGit(['merge', '--squash', cl.GetBranchRef()])
+  RunGit(['commit', '-m', description])
+  # dcommit the merge branch.
+  output = RunGit(['svn', 'dcommit'])
+  # And then swap back to the original branch and clean up.
+  RunGit(['checkout', '-q', cl.GetBranch()])
+  RunGit(['branch', '-D', MERGE_BRANCH])
+  if output.find("Committed r") != -1:
+    print "Closing issue (you may be prompted for your codereview password)..."
+    if cl.has_issue:
+      viewvc_url = settings.GetViewVCUrl()
+      if viewvc_url:
+        revision = re.compile(".*?\nCommitted r(\d+)",
+                              re.DOTALL).match(output).group(1)
+        cl.description = (cl.description +
+                          "\n\nCommitted: " + viewvc_url + revision)
+      cl.CloseIssue()
+      cl.SetIssue(0)
+
+
+def CmdPatch(args):
+  parser = optparse.OptionParser(usage=('git cl patch [options] '
+                                        '<patch url or issue id>'))
+  parser.add_option('-b', dest='newbranch',
+                    help='create a new branch off trunk for the patch')
+  parser.add_option('-f', action='store_true', dest='force',
+                    help='with -b, clobber any existing branch')
+  parser.add_option('--reject', action='store_true', dest='reject',
+                    help='allow failed patches and spew .rej files')
+  parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
+                    help="don't commit after patch applies")
+  (options, args) = parser.parse_args(args)
+  if len(args) != 1:
+    return parser.print_help()
+  input = args[0]
+
+  if re.match(r'\d+', input):
+    # Input is an issue id.  Figure out the URL.
+    issue = input
+    fetch = "curl --silent http://%s/%s" % (settings.GetServer(), issue)
+    grep = "grep -E -o '/download/issue[0-9]+_[0-9]+.diff'"
+    pipe = subprocess.Popen("%s | %s" % (fetch, grep), shell=True,
+                            stdout=subprocess.PIPE)
+    path = pipe.stdout.read().strip()
+    url = 'http://%s%s' % (settings.GetServer(), path)
+  else:
+    # Assume it's a URL to the patch.
+    match = re.match(r'http://.*?/issue(\d+)_\d+.diff', input)
+    if match:
+      issue = match.group(1)
+      url = input
+    else:
+      print "Must pass an issue ID or full URL for 'Download raw patch set'"
+      return 1
+
+  if options.newbranch:
+    if options.force:
+      RunGit(['branch', '-D', options.newbranch], error_ok=True)
+    RunGit(['checkout', '-b', options.newbranch])
+
+  # Switch up to the top-level directory, if necessary, in preparation for
+  # applying the patch.
+  top = RunGit(['rev-parse', '--show-cdup']).strip()
+  if top:
+    os.chdir(top)
+
+  # Construct a pipeline to feed the patch into "git apply".
+  # We use "git apply" to apply the patch instead of "patch" so that we can
+  # pick up file adds.
+  # 1) Fetch the patch.
+  fetch = "curl --silent %s" % url
+  # 2) Munge the patch.
+  # Git patches have a/ at the beginning of source paths.  We strip that out
+  # with a sed script rather than the -p flag to patch so we can feed either
+  # Git or svn-style patches into the same apply command.
+  gitsed = "sed -e 's|^--- a/|--- |; s|^+++ b/|+++ |'"
+  # 3) Apply the patch.
+  # The --index flag means: also insert into the index (so we catch adds).
+  apply = "git apply --index -p0"
+  if options.reject:
+    apply += " --reject"
+  subprocess.check_call(' | '.join([fetch, gitsed, apply]), shell=True)
+
+  # If we had an issue, commit the current state and register the issue.
+  if not options.nocommit:
+    RunGit(['commit', '-m', 'patch from issue %s' % issue])
+    cl = Changelist()
+    cl.SetIssue(issue)
+    print "Committed patch."
+  else:
+    print "Patch applied to index."
+
+def CmdRebase(args):
+  # Provide a wrapper for git svn rebase to help avoid accidental
+  # git svn dcommit.
+  RunGit(['svn', 'rebase'])
+
+def GetTreeStatus():
+  """Fetches the tree status and returns either 'open', 'closed',
+  'unknown' or 'unset'."""
+  url = settings.GetTreeStatusUrl(error_ok=True)
+  if url:
+    status = urllib2.urlopen(url).read().lower()
+    if status.find('closed') != -1:
+      return 'closed'
+    elif status.find('open') != -1:
+      return 'open'
+    return 'unknown'
+
+  return 'unset'
+
+def CmdTreeStatus(args):
+  status = GetTreeStatus()
+  if 'unset' == status:
+    print 'You must configure your tree status URL by running "git cl config".'
+  else:
+    print "The tree is %s" % status
+
+def CmdUpstream(args):
+  cl = Changelist()
+  print cl.GetUpstreamBranch()
+
+COMMANDS = [
+  ('config',  'edit configuration for this tree',            CmdConfig),
+  ('status',  'show status of changelists',                  CmdStatus),
+  ('issue',   'show/set current branch\'s issue number',     CmdIssue),
+  ('upload',  'upload the current changelist to codereview', CmdUpload),
+  ('dcommit', 'commit the current changelist via git-svn',   CmdDCommit),
+  ('patch',   'patch in a code review',                      CmdPatch),
+  ('rebase',  'rebase current branch on top of svn repo',    CmdRebase),
+  ('tree',    'show the status of the tree',                 CmdTreeStatus),
+  ('upstream', 'print the name of the upstream branch, if any', CmdUpstream),
+]
+
+
+def Usage(name):
+  print 'usage: %s <command>' % name
+  print 'commands are:'
+  for name, desc, _ in COMMANDS:
+    print '  %-10s %s' % (name, desc)
+  sys.exit(1)
+
+
+def main(argv):
+  if len(argv) < 2:
+    Usage(argv[0])
+
+  command = argv[1]
+  for name, _, func in COMMANDS:
+    if name == command:
+      return func(argv[2:])
+  print 'unknown command: %s' % command
+  Usage(argv[0])
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))

+ 184 - 0
git-try.py

@@ -0,0 +1,184 @@
+#!/usr/bin/python
+
+# Repo lives in ~evanm/projects/git-try -- feel free to send patches.
+
+import getpass
+import optparse
+import os
+import subprocess
+import tempfile
+import traceback
+import urllib
+import sys
+
+
+def Backquote(cmd):
+  """Like running `cmd` in a shell script."""
+  return subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].strip()
+
+
+def GetTryServerConfig():
+  """Returns the dictionary of try server options or None if they
+  cannot be found."""
+  script_path = 'tools/tryserver/tryserver.py'
+  root_dir = Backquote(['git', 'rev-parse', '--show-cdup'])
+  try:
+    script_file = open(os.path.join(root_dir, script_path))
+  except IOError:
+    return None
+  locals = {}
+  try:
+    exec(script_file, locals)
+  except Exception, e:
+    return None
+  return locals
+
+
+def GetBranchName():
+  """Return name of current git branch."""
+  branch = Backquote(['git', 'symbolic-ref', 'HEAD'])
+  if not branch.startswith('refs/heads/'):
+    raise "Couldn't figure out branch name"
+  branch = branch[len('refs/heads/'):]
+  return branch
+
+
+def GetPatchName():
+  """Construct a name for this patch."""
+  # TODO: perhaps include the hash of the current commit, to distinguish
+  # patches?
+  return GetBranchName()
+
+
+def GetRevision():
+  """Get the latest Subversion revision number."""
+  for line in Backquote(['git', 'svn', 'info']).split('\n'):
+    if line.startswith('Revision:'):
+      return line[len('Revision:'):].strip()
+  raise "Couldn't figure out latest revision"
+
+
+def GetRietveldIssueNumber():
+  return Backquote(['git', 'config',
+                    'branch.%s.rietveldissue' % GetBranchName()])
+
+
+def GetRietveldPatchsetNumber():
+  return Backquote(['git', 'config',
+                    'branch.%s.rietveldpatchset' % GetBranchName()])
+
+
+def GetMungedDiff(branch):
+  """Get the diff we'll send to the try server.  We munge paths to match svn."""
+  # Make the following changes:
+  # - Prepend "src/" to paths as svn is expecting
+  # - In the case of added files, replace /dev/null with the path to the file
+  #   being added.
+  output = []
+  if not branch:
+    # Try to guess the upstream branch.
+    branch = Backquote(['git', 'cl', 'upstream'])
+  diff = subprocess.Popen(['git', 'diff-tree', '-p', '--no-prefix',
+                           branch, 'HEAD'],
+                          stdout=subprocess.PIPE).stdout.readlines()
+  for i in range(len(diff)):
+    line = diff[i]
+    if line.startswith('--- /dev/null'):
+      line = '--- %s' % diff[i+1][4:]
+    elif line.startswith('--- ') or line.startswith('+++ '):
+      line = line[0:4] + 'src/' + line[4:]
+    output.append(line)
+
+  return ''.join(output)
+
+
+def GetEmail():
+  # TODO: check for errors here?
+  return Backquote(['git', 'config', 'user.email'])
+
+
+def TryChange(args):
+  """Put a patch on the try server using SVN."""
+  # TODO: figure out a better way to load trychange
+  script_path = '../depot_tools/release'
+  root_dir = Backquote(['git', 'rev-parse', '--show-cdup'])
+  sys.path.append(os.path.join(root_dir, script_path))
+  import trychange
+  trychange.checkout_root = os.path.abspath(root_dir)
+  trychange.TryChange(args, None, False)
+
+
+def WriteTryDiffHTTP(config, patch_name, diff, options):
+  """Put a patch on the try server."""
+  params = {
+      'user': getpass.getuser(),
+      'name': patch_name,
+      'patch': diff
+  }
+
+  if options.bot:
+    params['bot'] = options.bot
+
+  if options.clobber:
+    params['clobber'] = 'true'
+
+  url = 'http://%s:%s/send_try_patch' % (config['try_server_http_host'],
+                                         config['try_server_http_port'])
+  connection = urllib.urlopen(url, urllib.urlencode(params))
+  response = connection.read()
+  if (response != 'OK'):
+    print "Error posting to", url
+    print response
+    assert False
+
+
+if __name__ == '__main__':
+  parser = optparse.OptionParser(
+      usage='git try [branch]',
+      description='Upload the current diff of branch...HEAD to the try server.')
+  parser.add_option("-b", "--bot",
+                    help="Force the use of a specific build slave (eg mac, "
+                         "win, or linux)")
+  parser.add_option("-c", "--clobber", action="store_true",
+                    help="Make the try run use be a clobber build")
+  (options, args) = parser.parse_args(sys.argv)
+
+  branch = None
+  if len(args) > 1:
+    branch = args[1]
+
+  patch_name = GetPatchName()
+  diff = GetMungedDiff(branch)
+
+  # Send directly to try server if we can parse the config, otherwise
+  # upload via SVN.
+  config = GetTryServerConfig()
+  if config is not None:
+    print "Sending %s using HTTP..." % patch_name
+    WriteTryDiffHTTP(config=config, patch_name=patch_name, diff=diff,
+                     options=options)
+  else:
+    print "Sending %s using SVN..." % patch_name
+
+    # Write the diff out to a temporary file
+    diff_file = tempfile.NamedTemporaryFile()
+    diff_file.write(diff)
+    diff_file.flush()
+
+    email = GetEmail()
+    user = email.partition('@')[0]
+    args = [
+        '--use_svn',
+        '--svn_repo', 'svn://svn.chromium.org/chrome-try/try',
+        '-u', user,
+        '-e', email,
+        '-n', patch_name,
+        '-r', GetRevision(),
+        '--diff', diff_file.name,
+    ]
+    if GetRietveldPatchsetNumber():
+      args.extend([
+          '--issue', GetRietveldIssueNumber(),
+          '--patchset', GetRietveldPatchsetNumber(),
+      ])
+    TryChange(args=args)

+ 9 - 0
hammer

@@ -0,0 +1,9 @@
+#!/bin/sh
+
+# The first expression catches when we're actually in the /src directory.
+# The second expressions strips everything after the last /src occurrence.
+SRC_DIR=`pwd | sed -e '\;/src$;q' -e 's;\(.*/src\)/.*;\1;'`
+SCONS="${SRC_DIR}/third_party/scons/scons.py"
+SITE_SCONS="${SRC_DIR}/site_scons"
+
+exec python "${SCONS}" "--site-dir=${SITE_SCONS}" "$@"

+ 23 - 0
hammer.bat

@@ -0,0 +1,23 @@
+@echo off
+
+
+@rem We're in a submodule directory, look relative to the parent.
+call python "%cd%\..\third_party\scons\scons.py" "--site-dir=..\site_scons" %*
+goto omega
+
+:srcdir
+call python "%cd%\third_party\scons\scons.py" --site-dir=site_scons %*
+goto omega
+
+@rem Per the following page:
+@rem   http://code-bear.com/bearlog/2007/06/01/getting-the-exit-code-from-a-batch-file-that-is-run-from-a-python-program/
+@rem Just calling "exit /b" passes back an exit code, but in a way
+@rem that does NOT get picked up correctly when executing the .bat
+@rem file from the Python subprocess module.  Using "call" as the
+@rem last command in the .bat file makes it work as expected.
+
+:returncode
+exit /b %ERRORLEVEL%
+
+:omega
+call :returncode %ERRORLEVEL%

+ 715 - 0
presubmit.py

@@ -0,0 +1,715 @@
+#!/usr/bin/python
+# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Enables directory-specific presubmit checks to run at upload and/or commit.
+"""
+
+__version__ = '1.0'
+
+# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
+# caching (between all different invocations of presubmit scripts for a given
+# change). We should add it as our presubmit scripts start feeling slow.
+
+import cPickle  # Exposed through the API.
+import cStringIO  # Exposed through the API.
+import exceptions
+import fnmatch
+import glob
+import marshal  # Exposed through the API.
+import optparse
+import os  # Somewhat exposed through the API.
+import pickle  # Exposed through the API.
+import re  # Exposed through the API.
+import subprocess  # Exposed through the API.
+import sys  # Parts exposed through API.
+import tempfile  # Exposed through the API.
+import types
+import urllib2  # Exposed through the API.
+
+# Local imports.
+# TODO(joi) Would be cleaner to factor out utils in gcl to separate module, but
+# for now it would only be a couple of functions so hardly worth it.
+import gcl
+import presubmit_canned_checks
+
+
+# Matches key/value (or "tag") lines in changelist descriptions.
+_tag_line_re = re.compile(
+    '^\s*(?P<key>[A-Z][A-Z_0-9]*)\s*=\s*(?P<value>.*?)\s*$')
+
+
+# Friendly names may be used for certain keys.  All values for key-value pairs
+# in change descriptions (like BUG=123) can be retrieved from a change object
+# directly as if they were attributes, e.g. change.R (or equivalently because
+# we have a friendly name for it, change.Reviewers), change.BUG (or
+# change.BugIDs) and so forth.
+#
+# Add to this mapping as needed/desired.
+SPECIAL_KEYS = {
+  'Reviewers' : 'R',
+  'BugIDs' : 'BUG',
+  'Tested': 'TESTED'
+}
+
+
+class NotImplementedException(Exception):
+  """We're leaving placeholders in a bunch of places to remind us of the
+  design of the API, but we have not implemented all of it yet. Implement as
+  the need arises.
+  """
+  pass
+
+
+def normpath(path):
+  '''Version of os.path.normpath that also changes backward slashes to
+  forward slashes when not running on Windows.
+  '''
+  # This is safe to always do because the Windows version of os.path.normpath
+  # will replace forward slashes with backward slashes.
+  path = path.replace(os.sep, '/')
+  return os.path.normpath(path)
+
+
+
+class OutputApi(object):
+  """This class (more like a module) gets passed to presubmit scripts so that
+  they can specify various types of results.
+  """
+
+  class PresubmitResult(object):
+    """Base class for result objects."""
+
+    def __init__(self, message, items=None, long_text=''):
+      """
+      message: A short one-line message to indicate errors.
+      items: A list of short strings to indicate where errors occurred.
+      long_text: multi-line text output, e.g. from another tool
+      """
+      self._message = message
+      self._items = []
+      if items:
+        self._items = items
+      self._long_text = long_text.rstrip()
+
+    def _Handle(self, output_stream, input_stream, may_prompt=True):
+      """Writes this result to the output stream.
+
+      Args:
+        output_stream: Where to write
+
+      Returns:
+        True if execution may continue, False otherwise.
+      """
+      output_stream.write(self._message)
+      output_stream.write('\n')
+      for item in self._items:
+        output_stream.write('  %s\n' % item)
+      if self._long_text:
+        output_stream.write('\n***************\n%s\n***************\n\n' %
+                            self._long_text)
+
+      if self.ShouldPrompt() and may_prompt:
+        output_stream.write('Are you sure you want to continue? (y/N): ')
+        response = input_stream.readline()
+        if response.strip().lower() != 'y':
+          return False
+
+      return not self.IsFatal()
+
+    def IsFatal(self):
+      """An error that is fatal stops g4 mail/submit immediately, i.e. before
+      other presubmit scripts are run.
+      """
+      return False
+
+    def ShouldPrompt(self):
+      """Whether this presubmit result should result in a prompt warning."""
+      return False
+
+  class PresubmitError(PresubmitResult):
+    """A hard presubmit error."""
+    def IsFatal(self):
+      return True
+
+  class PresubmitPromptWarning(PresubmitResult):
+    """An warning that prompts the user if they want to continue."""
+    def ShouldPrompt(self):
+      return True
+
+  class PresubmitNotifyResult(PresubmitResult):
+    """Just print something to the screen -- but it's not even a warning."""
+    pass
+
+  class MailTextResult(PresubmitResult):
+    """A warning that should be included in the review request email."""
+    def __init__(self, *args, **kwargs):
+      raise NotImplementedException()  # TODO(joi) Implement.
+
+
+class InputApi(object):
+  """An instance of this object is passed to presubmit scripts so they can
+  know stuff about the change they're looking at.
+  """
+
+  def __init__(self, change, presubmit_path):
+    """Builds an InputApi object.
+
+    Args:
+      change: A presubmit.GclChange object.
+      presubmit_path: The path to the presubmit script being processed.
+    """
+    self.change = change
+
+    # We expose various modules and functions as attributes of the input_api
+    # so that presubmit scripts don't have to import them.
+    self.basename = os.path.basename
+    self.cPickle = cPickle
+    self.cStringIO = cStringIO
+    self.os_path = os.path
+    self.pickle = pickle
+    self.marshal = marshal
+    self.re = re
+    self.subprocess = subprocess
+    self.tempfile = tempfile
+    self.urllib2 = urllib2
+
+    # InputApi.platform is the platform you're currently running on.
+    self.platform = sys.platform
+
+    # The local path of the currently-being-processed presubmit script.
+    self.current_presubmit_path = presubmit_path
+
+    # We carry the canned checks so presubmit scripts can easily use them.
+    self.canned_checks = presubmit_canned_checks
+
+  def PresubmitLocalPath(self):
+    """Returns the local path of the presubmit script currently being run.
+
+    This is useful if you don't want to hard-code absolute paths in the
+    presubmit script.  For example, It can be used to find another file
+    relative to the PRESUBMIT.py script, so the whole tree can be branched and
+    the presubmit script still works, without editing its content.
+    """
+    return self.current_presubmit_path
+
+  @staticmethod
+  def DepotToLocalPath(depot_path):
+    """Translate a depot path to a local path (relative to client root).
+
+    Args:
+      Depot path as a string.
+
+    Returns:
+      The local path of the depot path under the user's current client, or None
+      if the file is not mapped.
+
+      Remember to check for the None case and show an appropriate error!
+    """
+    local_path = gcl.GetSVNFileInfo(depot_path).get('Path')
+    if not local_path:
+      return None
+    else:
+      return local_path
+
+  @staticmethod
+  def LocalToDepotPath(local_path):
+    """Translate a local path to a depot path.
+
+    Args:
+      Local path (relative to current directory, or absolute) as a string.
+
+    Returns:
+      The depot path (SVN URL) of the file if mapped, otherwise None.
+    """
+    depot_path = gcl.GetSVNFileInfo(local_path).get('URL')
+    if not depot_path:
+      return None
+    else:
+      return depot_path
+
+  @staticmethod
+  def FilterTextFiles(affected_files, include_deletes=True):
+    """Filters out all except text files and optionally also filters out
+    deleted files.
+
+    Args:
+      affected_files: List of AffectedFiles objects.
+      include_deletes: If false, deleted files will be filtered out.
+
+    Returns:
+      Filtered list of AffectedFiles objects.
+    """
+    output_files = []
+    for af in affected_files:
+      if include_deletes or af.Action() != 'D':
+        path = af.AbsoluteLocalPath()
+        mime_type = gcl.GetSVNFileProperty(path, 'svn:mime-type')
+        if not mime_type or mime_type.startswith('text/'):
+          output_files.append(af)
+    return output_files
+
+  def AffectedFiles(self, include_dirs=False, include_deletes=True):
+    """Same as input_api.change.AffectedFiles() except only lists files
+    (and optionally directories) in the same directory as the current presubmit
+    script, or subdirectories thereof.
+    """
+    output_files = []
+    dir_with_slash = normpath(
+        "%s/" % os.path.dirname(self.current_presubmit_path))
+    if len(dir_with_slash) == 1:
+      dir_with_slash = ''
+    for af in self.change.AffectedFiles(include_dirs, include_deletes):
+      af_path = normpath(af.LocalPath())
+      if af_path.startswith(dir_with_slash):
+        output_files.append(af)
+    return output_files
+
+  def LocalPaths(self, include_dirs=False):
+    """Returns local paths of input_api.AffectedFiles()."""
+    return [af.LocalPath() for af in self.AffectedFiles(include_dirs)]
+
+  def AbsoluteLocalPaths(self, include_dirs=False):
+    """Returns absolute local paths of input_api.AffectedFiles()."""
+    return [af.AbsoluteLocalPath() for af in self.AffectedFiles(include_dirs)]
+
+  def ServerPaths(self, include_dirs=False):
+    """Returns server paths of input_api.AffectedFiles()."""
+    return [af.ServerPath() for af in self.AffectedFiles(include_dirs)]
+
+  def AffectedTextFiles(self, include_deletes=True):
+    """Same as input_api.change.AffectedTextFiles() except only lists files
+    in the same directory as the current presubmit script, or subdirectories
+    thereof.
+
+    Warning: This function retrieves the svn property on each file so it can be
+    slow for large change lists.
+    """
+    return InputApi.FilterTextFiles(self.AffectedFiles(include_dirs=False),
+                                    include_deletes)
+
+  def RightHandSideLines(self):
+    """An iterator over all text lines in "new" version of changed files.
+
+    Only lists lines from new or modified text files in the change that are
+    contained by the directory of the currently executing presubmit script.
+
+    This is useful for doing line-by-line regex checks, like checking for
+    trailing whitespace.
+
+    Yields:
+      a 3 tuple:
+        the AffectedFile instance of the current file;
+        integer line number (1-based); and
+        the contents of the line as a string.
+    """
+    return InputApi._RightHandSideLinesImpl(
+        self.AffectedTextFiles(include_deletes=False))
+
+  @staticmethod
+  def _RightHandSideLinesImpl(affected_files):
+    """Implements RightHandSideLines for InputApi and GclChange."""
+    for af in affected_files:
+      lines = af.NewContents()
+      line_number = 0
+      for line in lines:
+        line_number += 1
+        yield (af, line_number, line)
+
+
+class AffectedFile(object):
+  """Representation of a file in a change."""
+
+  def __init__(self, path, action, repository_root=''):
+    self.path = path
+    self.action = action.strip()
+    self.repository_root = repository_root
+
+  def ServerPath(self):
+    """Returns a path string that identifies the file in the SCM system.
+
+    Returns the empty string if the file does not exist in SCM.
+    """
+    return gcl.GetSVNFileInfo(self.AbsoluteLocalPath()).get('URL', '')
+
+  def LocalPath(self):
+    """Returns the path of this file on the local disk relative to client root.
+    """
+    return normpath(self.path)
+
+  def AbsoluteLocalPath(self):
+    """Returns the absolute path of this file on the local disk.
+    """
+    return normpath(os.path.join(self.repository_root, self.LocalPath()))
+
+  def IsDirectory(self):
+    """Returns true if this object is a directory."""
+    if os.path.exists(self.path):
+      # Retrieve directly from the file system; it is much faster than querying
+      # subversion, especially on Windows.
+      return os.path.isdir(self.path)
+    else:
+      return gcl.GetSVNFileInfo(self.path).get('Node Kind') == 'directory'
+
+  def SvnProperty(self, property_name):
+    """Returns the specified SVN property of this file, or the empty string
+    if no such property.
+    """
+    return gcl.GetSVNFileProperty(self.AbsoluteLocalPath(), property_name)
+
+  def Action(self):
+    """Returns the action on this opened file, e.g. A, M, D, etc."""
+    return self.action
+
+  def NewContents(self):
+    """Returns an iterator over the lines in the new version of file.
+
+    The new version is the file in the user's workspace, i.e. the "right hand
+    side".
+
+    Contents will be empty if the file is a directory or does not exist.
+    """
+    if self.IsDirectory():
+      return []
+    else:
+      return gcl.ReadFile(self.AbsoluteLocalPath()).splitlines()
+
+  def OldContents(self):
+    """Returns an iterator over the lines in the old version of file.
+
+    The old version is the file in depot, i.e. the "left hand side".
+    """
+    raise NotImplementedError()  # Implement when needed
+
+  def OldFileTempPath(self):
+    """Returns the path on local disk where the old contents resides.
+
+    The old version is the file in depot, i.e. the "left hand side".
+    This is a read-only cached copy of the old contents. *DO NOT* try to
+    modify this file.
+    """
+    raise NotImplementedError()  # Implement if/when needed.
+
+
+class GclChange(object):
+  """A gcl change. See gcl.ChangeInfo for more info."""
+
+  def __init__(self, change_info, repository_root=''):
+    self.name = change_info.name
+    self.full_description = change_info.description
+    self.repository_root = repository_root
+
+    # From the description text, build up a dictionary of key/value pairs
+    # plus the description minus all key/value or "tag" lines.
+    self.description_without_tags = []
+    self.tags = {}
+    for line in change_info.description.splitlines():
+      m = _tag_line_re.match(line)
+      if m:
+        self.tags[m.group('key')] = m.group('value')
+      else:
+        self.description_without_tags.append(line)
+
+    # Change back to text and remove whitespace at end.
+    self.description_without_tags = '\n'.join(self.description_without_tags)
+    self.description_without_tags = self.description_without_tags.rstrip()
+
+    self.affected_files = [AffectedFile(info[1], info[0], repository_root) for
+                           info in change_info.files]
+
+  def Change(self):
+    """Returns the change name."""
+    return self.name
+
+  def Changelist(self):
+    """Synonym for Change()."""
+    return self.Change()
+
+  def DescriptionText(self):
+    """Returns the user-entered changelist description, minus tags.
+
+    Any line in the user-provided description starting with e.g. "FOO="
+    (whitespace permitted before and around) is considered a tag line.  Such
+    lines are stripped out of the description this function returns.
+    """
+    return self.description_without_tags
+
+  def FullDescriptionText(self):
+    """Returns the complete changelist description including tags."""
+    return self.full_description
+
+  def RepositoryRoot(self):
+    """Returns the repository root for this change, as an absolute path."""
+    return self.repository_root
+
+  def __getattr__(self, attr):
+    """Return keys directly as attributes on the object.
+
+    You may use a friendly name (from SPECIAL_KEYS) or the actual name of
+    the key.
+    """
+    if attr in SPECIAL_KEYS:
+      key = SPECIAL_KEYS[attr]
+      if key in self.tags:
+        return self.tags[key]
+    if attr in self.tags:
+      return self.tags[attr]
+
+  def AffectedFiles(self, include_dirs=False, include_deletes=True):
+    """Returns a list of AffectedFile instances for all files in the change.
+
+    Args:
+      include_deletes: If false, deleted files will be filtered out.
+      include_dirs: True to include directories in the list
+
+    Returns:
+      [AffectedFile(path, action), AffectedFile(path, action)]
+    """
+    if include_dirs:
+      affected = self.affected_files
+    else:
+      affected = filter(lambda x: not x.IsDirectory(), self.affected_files)
+
+    if include_deletes:
+      return affected
+    else:
+      return filter(lambda x: x.Action() != 'D', affected)
+
+  def AffectedTextFiles(self, include_deletes=True):
+    """Return a list of the text files in a change.
+
+    It's common to want to iterate over only the text files.
+
+    Args:
+      include_deletes: Controls whether to return files with "delete" actions,
+      which commonly aren't relevant to presubmit scripts.
+    """
+    return InputApi.FilterTextFiles(self.AffectedFiles(include_dirs=False),
+                                    include_deletes)
+
+  def LocalPaths(self, include_dirs=False):
+    """Convenience function."""
+    return [af.LocalPath() for af in self.AffectedFiles(include_dirs)]
+
+  def AbsoluteLocalPaths(self, include_dirs=False):
+    """Convenience function."""
+    return [af.AbsoluteLocalPath() for af in self.AffectedFiles(include_dirs)]
+
+  def ServerPaths(self, include_dirs=False):
+    """Convenience function."""
+    return [af.ServerPath() for af in self.AffectedFiles(include_dirs)]
+
+  def RightHandSideLines(self):
+    """An iterator over all text lines in "new" version of changed files.
+
+    Lists lines from new or modified text files in the change.
+
+    This is useful for doing line-by-line regex checks, like checking for
+    trailing whitespace.
+
+    Yields:
+      a 3 tuple:
+        the AffectedFile instance of the current file;
+        integer line number (1-based); and
+        the contents of the line as a string.
+    """
+    return InputApi._RightHandSideLinesImpl(
+        self.AffectedTextFiles(include_deletes=False))
+
+
+def ListRelevantPresubmitFiles(files):
+  """Finds all presubmit files that apply to a given set of source files.
+
+  Args:
+    files: An iterable container containing file paths.
+
+  Return:
+    ['foo/blat/PRESUBMIT.py', 'mat/gat/PRESUBMIT.py']
+  """
+  checked_dirs = {}  # Keys are directory paths, values are ignored.
+  source_dirs = [os.path.dirname(f) for f in files]
+  presubmit_files = []
+  for dir in source_dirs:
+    while (True):
+      if dir in checked_dirs:
+        break  # We've already walked up from this directory.
+
+      test_path = os.path.join(dir, 'PRESUBMIT.py')
+      if os.path.isfile(test_path):
+        presubmit_files.append(normpath(test_path))
+
+      checked_dirs[dir] = ''
+      if dir in ['', '.']:
+        break
+      else:
+        dir = os.path.dirname(dir)
+  return presubmit_files
+
+
+class PresubmitExecuter(object):
+
+  def __init__(self, change_info, committing):
+    """
+    Args:
+      change_info: The ChangeInfo object for the change.
+      committing: True if 'gcl commit' is running, False if 'gcl upload' is.
+    """
+    self.change = GclChange(change_info, gcl.GetRepositoryRoot())
+    self.committing = committing
+
+  def ExecPresubmitScript(self, script_text, presubmit_path):
+    """Executes a single presubmit script.
+
+    Args:
+      script_text: The text of the presubmit script.
+      presubmit_path: The path to the presubmit file (this will be reported via
+        input_api.PresubmitLocalPath()).
+
+    Return:
+      A list of result objects, empty if no problems.
+    """
+    input_api = InputApi(self.change, presubmit_path)
+    context = {}
+    exec script_text in context
+
+    # These function names must change if we make substantial changes to
+    # the presubmit API that are not backwards compatible.
+    if self.committing:
+      function_name = 'CheckChangeOnCommit'
+    else:
+      function_name = 'CheckChangeOnUpload'
+    if function_name in context:
+      context['__args'] = (input_api, OutputApi())
+      result = eval(function_name + '(*__args)', context)
+      if not (isinstance(result, types.TupleType) or
+              isinstance(result, types.ListType)):
+        raise exceptions.RuntimeError(
+          'Presubmit functions must return a tuple or list')
+      for item in result:
+        if not isinstance(item, OutputApi.PresubmitResult):
+          raise exceptions.RuntimeError(
+            'All presubmit results must be of types derived from '
+            'output_api.PresubmitResult')
+    else:
+      result = ()  # no error since the script doesn't care about current event.
+
+    return result
+
+
+def DoPresubmitChecks(change_info,
+                      committing,
+                      verbose,
+                      output_stream,
+                      input_stream,
+                      default_presubmit):
+  """Runs all presubmit checks that apply to the files in the change.
+
+  This finds all PRESUBMIT.py files in directories enclosing the files in the
+  change (up to the repository root) and calls the relevant entrypoint function
+  depending on whether the change is being committed or uploaded.
+
+  Prints errors, warnings and notifications.  Prompts the user for warnings
+  when needed.
+
+  Args:
+    change_info: The ChangeInfo object for the change.
+    committing: True if 'gcl commit' is running, False if 'gcl upload' is.
+    verbose: Prints debug info.
+    output_stream: A stream to write output from presubmit tests to.
+    input_stream: A stream to read input from the user.
+    default_presubmit: A default presubmit script to execute in any case.
+
+  Return:
+    True if execution can continue, False if not.
+  """
+  presubmit_files = ListRelevantPresubmitFiles(change_info.FileList())
+  if not presubmit_files and verbose:
+    print "Warning, no presubmit.py found."
+  results = []
+  executer = PresubmitExecuter(change_info, committing)
+  if default_presubmit:
+    if verbose:
+      print "Running default presubmit script"
+    results += executer.ExecPresubmitScript(default_presubmit, 'PRESUBMIT.py')
+  for filename in presubmit_files:
+    if verbose:
+      print "Running %s" % filename
+    presubmit_script = gcl.ReadFile(filename)
+    results += executer.ExecPresubmitScript(presubmit_script, filename)
+
+  errors = []
+  notifications = []
+  warnings = []
+  for result in results:
+    if not result.IsFatal() and not result.ShouldPrompt():
+      notifications.append(result)
+    elif result.ShouldPrompt():
+      warnings.append(result)
+    else:
+      errors.append(result)
+
+  error_count = 0
+  for name, items in (('Messages', notifications),
+                      ('Warnings', warnings),
+                      ('ERRORS', errors)):
+    if items:
+      output_stream.write('\n** Presubmit %s **\n\n' % name)
+      for item in items:
+        if not item._Handle(output_stream, input_stream,
+                            may_prompt=False):
+          error_count += 1
+        output_stream.write('\n')
+  if not errors and warnings:
+    output_stream.write(
+      'There were presubmit warnings. Sure you want to continue? (y/N): ')
+    response = input_stream.readline()
+    if response.strip().lower() != 'y':
+      error_count += 1
+  return (error_count == 0)
+
+
+def ScanSubDirs(mask, recursive):
+  if not recursive:
+    return [x for x in glob.glob(mask) if '.svn' not in x]
+  else:
+    results = []
+    for root, dirs, files in os.walk('.'):
+      if '.svn' in dirs:
+        dirs.remove('.svn')
+      for name in files:
+        if fnmatch.fnmatch(name, mask):
+          results.append(os.path.join(root, name))
+    return results
+
+
+def ParseFiles(args, recursive):
+  files = []
+  for arg in args:
+    files.extend([('M', file) for file in ScanSubDirs(arg, recursive)])
+  return files
+
+
+def Main(argv):
+  parser = optparse.OptionParser(usage="%prog [options]",
+                                 version="%prog " + str(__version__))
+  parser.add_option("-c", "--commit", action="store_true",
+                   help="Use commit instead of upload checks")
+  parser.add_option("-r", "--recursive", action="store_true",
+                   help="Act recursively")
+  parser.add_option("-v", "--verbose", action="store_true",
+                   help="Verbose output")
+  options, args = parser.parse_args(argv[1:])
+  files = ParseFiles(args, options.recursive)
+  if options.verbose:
+    print "Found %d files." % len(files)
+  return not DoPresubmitChecks(gcl.ChangeInfo(name='temp', files=files),
+                               options.commit,
+                               options.verbose,
+                               sys.stdout,
+                               sys.stdin,
+                               default_presubmit=None)
+
+
+if __name__ == '__main__':
+  sys.exit(Main(sys.argv))

+ 102 - 0
presubmit_canned_checks.py

@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Generic presubmit checks that can be reused by other presubmit checks."""
+
+
+def CheckChangeHasTestedField(input_api, output_api):
+  """Requires that the changelist have a TESTED= field."""
+  if input_api.change.Tested:
+    return []
+  else:
+    return [output_api.PresubmitError("Changelist must have a TESTED= field.")]
+
+
+def CheckChangeHasQaField(input_api, output_api):
+  """Requires that the changelist have a QA= field."""
+  if input_api.change.QA:
+    return []
+  else:
+    return [output_api.PresubmitError("Changelist must have a QA= field.")]
+
+
+def CheckDoNotSubmitInDescription(input_api, output_api):
+  """Checks that the user didn't add 'DO NOT ' + 'SUBMIT' to the CL description.
+  """
+  keyword = 'DO NOT ' + 'SUBMIT'
+  if keyword in input_api.change.DescriptionText():
+    return [output_api.PresubmitError(
+        keyword + " is present in the changelist description.")]
+  else:
+    return []
+
+
+def CheckDoNotSubmitInFiles(input_api, output_api):
+  """Checks that the user didn't add 'DO NOT ' + 'SUBMIT' to any files."""
+  keyword = 'DO NOT ' + 'SUBMIT'
+  for f, line_num, line in input_api.RightHandSideLines():
+    if keyword in line:
+      text = 'Found ' + keyword + ' in %s, line %s' % (f.LocalPath(), line_num)
+      return [output_api.PresubmitError(text)]
+  return []
+
+
+def CheckDoNotSubmit(input_api, output_api):
+  return (
+      CheckDoNotSubmitInDescription(input_api, output_api) +
+      CheckDoNotSubmitInFiles(input_api, output_api)
+      )
+
+
+def CheckChangeHasNoTabs(input_api, output_api):
+  """Checks that there are no tab characters in any of the text files to be
+  submitted.
+  """
+  for f, line_num, line in input_api.RightHandSideLines():
+    if '\t' in line:
+      return [output_api.PresubmitError(
+          "Found a tab character in %s, line %s" %
+          (f.LocalPath(), line_num))]
+  return []
+
+
+def CheckLongLines(input_api, output_api, maxlen=80):
+  """Checks that there aren't any lines longer than maxlen characters in any of
+  the text files to be submitted.
+  """
+  basename = input_api.basename
+
+  bad = []
+  for f, line_num, line in input_api.RightHandSideLines():
+    if line.endswith('\n'):
+      line = line[:-1]
+    if len(line) > maxlen:
+      bad.append(
+          '%s, line %s, %s chars' %
+          (basename(f.LocalPath()), line_num, len(line)))
+      if len(bad) == 5:  # Just show the first 5 errors.
+        break
+
+  if bad:
+    msg = "Found lines longer than %s characters (first 5 shown)." % maxlen
+    return [output_api.PresubmitPromptWarning(msg, items=bad)]
+  else:
+    return []
+
+
+def CheckTreeIsOpen(input_api, output_api, url, closed):
+  """Checks that an url's content doesn't match a regexp that would mean that
+  the tree is closed."""
+  try:
+    connection = input_api.urllib2.urlopen(url)
+    status = connection.read()
+    connection.close()
+    if input_api.re.match(closed, status):
+      long_text = status + '\n' + url
+      return [output_api.PresubmitError("The tree is closed.",
+                                        long_text=long_text)]
+  except IOError:
+    pass
+  return []

+ 8 - 0
profile.xml

@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
+<Profile FormatVersion="1">
+  <Tools>
+    <Tool Filename="python" AllowIntercept="true" />
+    <Tool Filename="cl" AllowRemote="true" VCCompiler="true" />
+    <Tool Filename="link" AllowRemote="false" />
+  </Tools>
+</Profile>

+ 5 - 0
revert

@@ -0,0 +1,5 @@
+#!/bin/sh
+
+base_dir=$(dirname "$0")
+
+exec python "$base_dir/revert.py" "$@"

+ 1 - 0
revert.bat

@@ -0,0 +1 @@
+@python "%~dp0revert.py" %*

+ 284 - 0
revert.py

@@ -0,0 +1,284 @@
+#!/usr/bin/python
+# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Tool to quickly revert a change.
+
+import exceptions
+import optparse
+import os
+import sys
+import xml
+
+import gcl
+import gclient
+
+class ModifiedFile(exceptions.Exception):
+  pass
+class NoModifiedFile(exceptions.Exception):
+  pass
+class NoBlameList(exceptions.Exception):
+  pass
+class OutsideOfCheckout(exceptions.Exception):
+  pass
+
+
+def getTexts(nodelist):
+  """Return a list of texts in the children of a list of DOM nodes."""
+  rc = []
+  for node in nodelist:
+    if node.nodeType == node.TEXT_NODE:
+      rc.append(node.data)
+    else:
+      rc.extend(getTexts(node.childNodes))
+  return rc
+
+
+def RunShellXML(command, print_output=False, keys=None):
+  output = gcl.RunShell(command, print_output)
+  try:
+    dom = xml.dom.minidom.parseString(output)
+    if not keys:
+      return dom
+    result = {}
+    for key in keys:
+      result[key] = getTexts(dom.getElementsByTagName(key))
+  except xml.parsers.expat.ExpatError:
+    print "Failed to parse output:\n%s" % output
+    raise
+  return result
+
+
+def UniqueFast(list):
+  list = [item for item in set(list)]
+  list.sort()
+  return list
+
+
+def GetRepoBase():
+  """Returns the repository base of the root local checkout."""
+  xml_data = RunShellXML(['svn', 'info', '.', '--xml'], keys=['root', 'url'])
+  root = xml_data['root'][0]
+  url = xml_data['url'][0]
+  if not root or not url:
+    raise exceptions.Exception("I'm confused by your checkout")
+  if not url.startswith(root):
+    raise exceptions.Exception("I'm confused by your checkout", url, root)
+  return url[len(root):] + '/'
+
+
+def Revert(revisions, force=False, commit=True, send_email=True, message=None,
+           reviewers=None):
+  """Reverts many revisions in one change list.
+
+  If force is True, it will override local modifications.
+  If commit is True, a commit is done after the revert.
+  If send_mail is True, a review email is sent.
+  If message is True, it is used as the change description.
+  reviewers overrides the blames email addresses for review email."""
+
+  # Use the oldest revision as the primary revision.
+  changename = "revert%d" % revisions[len(revisions)-1]
+  if not force and os.path.exists(gcl.GetChangelistInfoFile(changename)):
+    print "Error, change %s already exist." % changename
+    return 1
+
+  # Move to the repository root and make the revision numbers sorted in
+  # decreasing order.
+  os.chdir(gcl.GetRepositoryRoot())
+  revisions.sort(reverse=True)
+  revisions_string = ",".join([str(rev) for rev in revisions])
+  revisions_string_rev = ",".join([str(-rev) for rev in revisions])
+
+  repo_base = GetRepoBase()
+  files = []
+  blames = []
+  # Get all the modified files by the revision. We'll use this list to optimize
+  # the svn merge.
+  for revision in revisions:
+    log = RunShellXML(["svn", "log", "-r", str(revision), "-v", "--xml"],
+                        keys=['path', 'author'])
+    for file in log['path']:
+      # Remove the /trunk/src/ part. The + 1 is for the last slash.
+      if not file.startswith(repo_base):
+        raise OutsideOfCheckout(file)
+      files.append(file[len(repo_base):])
+    blames.extend(log['author'])
+
+  # On Windows, we need to fix the slashes once they got the url part removed.
+  if sys.platform == 'win32':
+    # On Windows, gcl expect the correct slashes.
+    files = [file.replace('/', os.sep) for file in files]
+
+  # Keep unique.
+  files = UniqueFast(files)
+  blames = UniqueFast(blames)
+  if not reviewers:
+    reviewers = blames
+  else:
+    reviewers = UniqueFast(reviewers)
+
+  # Make sure there's something to revert.
+  if not files:
+    raise NoModifiedFile
+  if not reviewers:
+    raise NoBlameList
+
+  if blames:
+    print "Blaming %s\n" % ",".join(blames)
+  if reviewers != blames:
+    print "Emailing %s\n" % ",".join(reviewers)
+  print "These files were modified in %s:" % revisions_string
+  print "\n".join(files)
+  print ""
+
+  # Make sure these files are unmodified with svn status.
+  status = gcl.RunShell(["svn", "status"] + files)
+  if status:
+    if force:
+      # TODO(maruel): Use the tool to correctly revert '?' files.
+      gcl.RunShell(["svn", "revert"] + files)
+    else:
+      raise ModifiedFile(status)
+  # svn up on each of these files
+  gcl.RunShell(["svn", "up"] + files)
+
+  files_status = {}
+  # Extract the first level subpaths. Subversion seems to degrade
+  # exponentially w.r.t. repository size during merges. Working at the root
+  # directory is too rough for svn due to the repository size.
+  roots = UniqueFast([file.split(os.sep)[0] for file in files])
+  for root in roots:
+    # Is it a subdirectory or a files?
+    is_root_subdir = os.path.isdir(root)
+    need_to_update = False
+    if is_root_subdir:
+      os.chdir(root)
+      file_list = []
+      # List the file directly since it is faster when there is only one file.
+      for file in files:
+        if file.startswith(root):
+          file_list.append(file[len(root)+1:])
+      if len(file_list) > 1:
+        # Listing multiple files is not supported by svn merge.
+        file_list = ['.']
+        need_to_update = True
+    else:
+      # Oops, root was in fact a file in the root directory.
+      file_list = [root]
+      root = "."
+
+    print "Reverting %s in %s/" % (revisions_string, root)
+    if need_to_update:
+      # Make sure '.' revision is high enough otherwise merge will be
+      # unhappy.
+      retcode = gcl.RunShellWithReturnCode(['svn', 'up', '.', '-N'])[1]
+      if retcode:
+        print 'svn up . -N failed in %s/.' % root
+        return retcode
+
+    command = ["svn", "merge", "-c", revisions_string_rev]
+    command.extend(file_list)
+    (output, retcode) = gcl.RunShellWithReturnCode(command, print_output=True)
+    if retcode:
+      print "'%s' failed:" % command
+      return retcode
+
+    # Grab the status
+    lines = output.split('\n')
+    for line in lines:
+      if line.startswith('---'):
+        continue
+      if line.startswith('Skipped'):
+        print ""
+        raise ModifiedFile(line[9:-1])
+      # Update the status.
+      status = line[:5] + '  '
+      file = line[5:]
+      if is_root_subdir:
+        files_status[root + os.sep + file] = status
+      else:
+        files_status[file] = status
+
+    if is_root_subdir:
+      os.chdir('..')
+
+  # Transform files_status from a dictionary to a list of tuple.
+  files_status = [(files_status[file], file) for file in files]
+
+  description = "Reverting %s." % revisions_string
+  if message:
+    description += "\n\n"
+    description += message
+  # Don't use gcl.Change() since it prompts the user for infos.
+  change_info = gcl.ChangeInfo(name=changename, issue='',
+                               description=description, files=files_status)
+  change_info.Save()
+
+  upload_args = ['-r', ",".join(reviewers)]
+  if send_email:
+    upload_args.append('--send_mail')
+  if commit:
+    upload_args.append('--no_try')
+  gcl.UploadCL(change_info, upload_args)
+
+  retcode = 0
+  if commit:
+    gcl.Commit(change_info, ['--force'])
+    # TODO(maruel):  gclient sync (to leave the local checkout in an usable
+    # state)
+    retcode = gclient.Main(["gclient.py", "sync"])
+  return retcode
+
+
+def Main(argv):
+  usage = (
+"""%prog [options] [revision numbers to revert]
+Revert a set of revisions, send the review to Rietveld, sends a review email
+and optionally commit the revert.""")
+
+  parser = optparse.OptionParser(usage=usage)
+  parser.add_option("-c", "--commit", default=False, action="store_true",
+                    help="Commits right away.")
+  parser.add_option("-f", "--force", default=False, action="store_true",
+                    help="Forces the local modification even if a file is "
+                         "already modified locally.")
+  parser.add_option("-n", "--no_email", default=False, action="store_true",
+                    help="Inhibits from sending a review email.")
+  parser.add_option("-m", "--message", default=None,
+                    help="Additional change description message.")
+  parser.add_option("-r", "--reviewers", action="append",
+                    help="Reviewers to send the email to. By default, the list "
+                         "of commiters is used.")
+  options, args = parser.parse_args(argv)
+  revisions = []
+  try:
+    for item in args[1:]:
+      revisions.append(int(item))
+  except ValueError:
+    parser.error("You need to pass revision numbers.")
+  if not revisions:
+    parser.error("You need to pass revision numbers.")
+  retcode = 1
+  try:
+    if not os.path.exists(gcl.GetInfoDir()):
+      os.mkdir(gcl.GetInfoDir())
+    retcode = Revert(revisions, options.force, options.commit,
+                     not options.no_email, options.message, options.reviewers)
+  except NoBlameList:
+    print "Error: no one to blame."
+  except NoModifiedFile:
+    print "Error: no files to revert."
+  except ModifiedFile, e:
+    print "You need to revert these files since they were already modified:"
+    print "".join(e.args)
+    print "You can use the --force flag to revert the files."
+  except OutsideOfCheckout, e:
+    print "Your repository doesn't contain ", str(e)
+
+  return retcode
+
+
+if __name__ == "__main__":
+  sys.exit(Main(sys.argv))

+ 41 - 0
tests/abandon.sh

@@ -0,0 +1,41 @@
+#!/bin/bash
+
+# Check that abandoning a branch also abandons its issue.
+
+set -e
+
+. ./test-lib.sh
+
+setup_initsvn
+setup_gitsvn
+
+(
+  set -e
+  cd git-svn
+  git config rietveld.server localhost:8080
+
+  # Create a branch and give it an issue.
+  git checkout -q -b abandoned
+  echo "some work done on a branch" >> test
+  git add test; git commit -q -m "branch work"
+  export EDITOR=/bin/true
+  test_expect_success "upload succeeds" \
+    "$GIT_CL upload -m test master... | grep -q 'Issue created'"
+
+  # Switch back to master, delete the branch.
+  git checkout master
+  git branch -D abandoned
+
+  # Verify that "status" doesn't know about it anymore.
+  # The "exit" trickiness is inverting the exit status of grep.
+  test_expect_success "git-cl status dropped abandoned branch" \
+    "$GIT_CL status | grep -q abandoned && exit 1 || exit 0"
+)
+
+SUCCESS=$?
+
+cleanup
+
+if [ $SUCCESS == 0 ]; then
+  echo PASS
+fi

+ 62 - 0
tests/basic.sh

@@ -0,0 +1,62 @@
+#!/bin/bash
+
+set -e
+
+. ./test-lib.sh
+
+setup_initsvn
+setup_gitsvn
+
+(
+  set -e
+  cd git-svn
+  git checkout -q -b work
+  echo "some work done on a branch" >> test
+  git add test; git commit -q -m "branch work"
+  echo "some other work done on a branch" >> test
+  git add test; git commit -q -m "branch work"
+
+  test_expect_success "git-cl upload wants a server" \
+    "$GIT_CL upload 2>&1 | grep -q 'You must configure'"
+
+  git config rietveld.server localhost:8080
+
+  test_expect_success "git-cl status has no issue" \
+    "$GIT_CL status | grep -q 'no issue'"
+
+  # Prevent the editor from coming up when you upload.
+  export EDITOR=/bin/true
+  test_expect_success "upload succeeds (needs a server running on localhost)" \
+    "$GIT_CL upload -m test master... | grep -q 'Issue created'"
+
+  test_expect_success "git-cl status now knows the issue" \
+    "$GIT_CL status | grep -q 'Issue number'"
+
+  # Push a description to this URL.
+  URL=$($GIT_CL status | sed -ne '/Issue number/s/[^(]*(\(.*\))/\1/p')
+  curl --cookie dev_appserver_login="test@example.com:False" \
+       --data-urlencode subject="test" \
+       --data-urlencode description="foo-quux" \
+       $URL/edit
+
+  test_expect_success "git-cl dcommits ok" \
+    "$GIT_CL dcommit -f"
+
+  git checkout -q master
+  git svn -q rebase >/dev/null 2>&1
+  test_expect_success "dcommitted code has proper description" \
+      "git show | grep -q 'foo-quux'"
+
+  test_expect_success "issue no longer has a branch" \
+      "git cl status | grep -q 'work: None'"
+
+  test_expect_success "upstream svn has our commit" \
+      "svn log $REPO_URL 2>/dev/null | grep -q 'foo-quux'"
+)
+SUCCESS=$?
+
+cleanup
+
+if [ $SUCCESS == 0 ]; then
+  echo PASS
+fi

+ 699 - 0
tests/presubmit_unittest.py

@@ -0,0 +1,699 @@
+#!/usr/bin/python
+# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for presubmit.py and presubmit_canned_checks.py."""
+
+import os
+import StringIO
+import unittest
+
+# Local imports
+import gcl
+import presubmit
+import presubmit_canned_checks
+
+
+class PresubmitTestsBase(unittest.TestCase):
+  """Setups and tear downs the mocks but doesn't test anything as-is."""
+  def setUp(self):
+    self.original_IsFile = os.path.isfile
+    def MockIsFile(f):
+      dir = os.path.dirname(f)
+      return dir.endswith('haspresubmit') or dir == ''
+    os.path.isfile = MockIsFile
+
+    self.original_GetSVNFileInfo = gcl.GetSVNFileInfo
+    def MockGetSVNFileInfo(path):
+      if path.count('notfound'):
+        return {}
+      results = {
+        'Path': path[len('svn:/foo/'):],
+        'URL': 'svn:/foo/%s' % path.replace('\\', '/'),
+      }
+      if path.endswith('isdir'):
+        results['Node Kind'] = 'directory'
+      else:
+        results['Node Kind'] = 'file'
+      return results
+    gcl.GetSVNFileInfo = MockGetSVNFileInfo
+
+    self.original_GetSVNFileProperty = gcl.GetSVNFileProperty
+    def MockGetSVNFileProperty(path, property_name):
+      if property_name == 'svn:secret-property':
+        return 'secret-property-value'
+      elif path.count('binary'):
+        return 'application/octet-stream'
+      else:
+        if len(path) % 2:
+          return 'text/plain'
+        else:
+          return ''
+    gcl.GetSVNFileProperty = MockGetSVNFileProperty
+
+    self.original_ReadFile = gcl.ReadFile
+    def MockReadFile(path):
+      if path.count('nosuchfile'):
+        return None
+      elif path.endswith('isdir'):
+        self.fail('Should not attempt to read file that is directory.')
+      elif path.endswith('PRESUBMIT.py'):
+        # used in testDoPresubmitChecks
+        return """
+def CheckChangeOnUpload(input_api, output_api):
+  if not input_api.change.NOSUCHKEY:
+    return [output_api.PresubmitError("!!")]
+  elif not input_api.change.REALLYNOSUCHKEY:
+    return [output_api.PresubmitPromptWarning("??")]
+  elif not input_api.change.REALLYABSOLUTELYNOSUCHKEY:
+    return [output_api.PresubmitPromptWarning("??"),
+            output_api.PresubmitError("XX!!XX")]
+  else:
+    return ()
+"""
+      else:
+        return 'one:%s\r\ntwo:%s' % (path, path)
+    gcl.ReadFile = MockReadFile
+
+    self.original_GetRepositoryRoot = gcl.GetRepositoryRoot
+    def MockGetRepositoryRoot():
+      return ''
+    gcl.GetRepositoryRoot = MockGetRepositoryRoot
+
+  def tearDown(self):
+    os.path.isfile = self.original_IsFile
+    gcl.GetSVNFileInfo = self.original_GetSVNFileInfo
+    gcl.GetSVNFileProperty = self.original_GetSVNFileProperty
+    gcl.ReadFile = self.original_ReadFile
+    gcl.GetRepositoryRoot = self.original_GetRepositoryRoot
+
+  @staticmethod
+  def MakeBasicChange(name, description):
+    ci = gcl.ChangeInfo(name=name,
+                        description=description,
+                        files=[])
+    change = presubmit.GclChange(ci)
+    return change
+
+  def compareMembers(self, object, members):
+    """If you add a member, be sure to add the relevant test!"""
+    # Skip over members starting with '_' since they are usually not meant to
+    # be for public use.
+    actual_members = [x for x in sorted(dir(object))
+                      if not x.startswith('_')]
+    self.assertEqual(actual_members, sorted(members))
+
+
+class PresubmitUnittest(PresubmitTestsBase):
+  """General presubmit.py tests (excluding InputApi and OutputApi)."""
+  def testMembersChanged(self):
+    members = [
+      'AffectedFile', 'DoPresubmitChecks', 'GclChange', 'InputApi',
+      'ListRelevantPresubmitFiles', 'Main', 'NotImplementedException',
+      'OutputApi', 'ParseFiles', 'PresubmitExecuter', 'SPECIAL_KEYS',
+      'ScanSubDirs', 'cPickle', 'cStringIO', 'exceptions',
+      'fnmatch', 'gcl', 'glob', 'marshal', 'normpath', 'optparse', 'os',
+      'pickle', 'presubmit_canned_checks', 're', 'subprocess', 'sys',
+      'tempfile', 'types', 'urllib2',
+    ]
+    # If this test fails, you should add the relevant test.
+    self.compareMembers(presubmit, members)
+
+  def testListRelevantPresubmitFiles(self):
+    presubmit_files = presubmit.ListRelevantPresubmitFiles([
+        'blat.cc',
+        'foo/haspresubmit/yodle/smart.h',
+        'moo/mat/gat/yo.h',
+        'foo/luck.h'])
+    self.failUnless(len(presubmit_files) == 2)
+    self.failUnless(presubmit.normpath('PRESUBMIT.py') in presubmit_files)
+    self.failUnless(presubmit.normpath('foo/haspresubmit/PRESUBMIT.py') in
+                    presubmit_files)
+
+  def testTagLineRe(self):
+    m = presubmit._tag_line_re.match(' BUG =1223, 1445  \t')
+    self.failUnless(m)
+    self.failUnlessEqual(m.group('key'), 'BUG')
+    self.failUnlessEqual(m.group('value'), '1223, 1445')
+
+  def testGclChange(self):
+    description_lines = ('Hello there',
+                         'this is a change',
+                         'BUG=123',
+                         ' STORY =http://foo/  \t',
+                         'and some more regular text  \t')
+    files = [
+      ['A', 'foo/blat.cc'],
+      ['M', 'binary.dll'],  # a binary file
+      ['A', 'isdir'],  # a directory
+      ['M', 'flop/notfound.txt'],  # not found in SVN, still exists locally
+      ['D', 'boo/flap.h'],
+    ]
+
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='\n'.join(description_lines),
+                        files=files)
+    change = presubmit.GclChange(ci)
+
+    self.failUnless(change.Change() == 'mychange')
+    self.failUnless(change.Changelist() == 'mychange')
+    self.failUnless(change.DescriptionText() ==
+                    'Hello there\nthis is a change\nand some more regular text')
+    self.failUnless(change.FullDescriptionText() ==
+                    '\n'.join(description_lines))
+
+    self.failUnless(change.BugIDs == '123')
+    self.failUnless(change.BUG == '123')
+    self.failUnless(change.STORY == 'http://foo/')
+
+    self.failUnless(len(change.AffectedFiles()) == 4)
+    self.failUnless(len(change.AffectedFiles(include_dirs=True)) == 5)
+    self.failUnless(len(change.AffectedFiles(include_deletes=False)) == 3)
+    self.failUnless(len(change.AffectedFiles(include_dirs=True,
+                                             include_deletes=False)) == 4)
+
+    affected_text_files = change.AffectedTextFiles(include_deletes=True)
+    self.failUnless(len(affected_text_files) == 3)
+    self.failIf(filter(lambda x: x.LocalPath() == 'binary.dll',
+                       affected_text_files))
+
+    local_paths = change.LocalPaths()
+    expected_paths = [presubmit.normpath(f[1]) for f in files]
+    self.failUnless(
+        len(filter(lambda x: x in expected_paths, local_paths)) == 4)
+
+    server_paths = change.ServerPaths()
+    expected_paths = ['svn:/foo/%s' % f[1] for f in files if
+                      f[1] != 'flop/notfound.txt']
+    expected_paths.append('')  # one unknown file
+    self.failUnless(
+      len(filter(lambda x: x in expected_paths, server_paths)) == 4)
+
+    files = [[x[0], presubmit.normpath(x[1])] for x in files]
+
+    rhs_lines = []
+    for line in change.RightHandSideLines():
+      rhs_lines.append(line)
+    self.failUnless(rhs_lines[0][0].LocalPath() == files[0][1])
+    self.failUnless(rhs_lines[0][1] == 1)
+    self.failUnless(rhs_lines[0][2] == 'one:%s' % files[0][1])
+
+    self.failUnless(rhs_lines[1][0].LocalPath() == files[0][1])
+    self.failUnless(rhs_lines[1][1] == 2)
+    self.failUnless(rhs_lines[1][2] == 'two:%s' % files[0][1])
+
+    self.failUnless(rhs_lines[2][0].LocalPath() == files[3][1])
+    self.failUnless(rhs_lines[2][1] == 1)
+    self.failUnless(rhs_lines[2][2] == 'one:%s' % files[3][1])
+
+    self.failUnless(rhs_lines[3][0].LocalPath() == files[3][1])
+    self.failUnless(rhs_lines[3][1] == 2)
+    self.failUnless(rhs_lines[3][2] == 'two:%s' % files[3][1])
+
+  def testAffectedFile(self):
+    af = presubmit.AffectedFile('foo/blat.cc', 'M')
+    self.failUnless(af.ServerPath() == 'svn:/foo/foo/blat.cc')
+    self.failUnless(af.LocalPath() == presubmit.normpath('foo/blat.cc'))
+    self.failUnless(af.Action() == 'M')
+    self.failUnless(af.NewContents() == ['one:%s' % af.LocalPath(),
+                                         'two:%s' % af.LocalPath()])
+
+    af = presubmit.AffectedFile('notfound.cc', 'A')
+    self.failUnless(af.ServerPath() == '')
+
+  def testExecPresubmitScript(self):
+    description_lines = ('Hello there',
+                         'this is a change',
+                         'STORY=http://tracker/123')
+    files = [
+      ['A', 'foo\\blat.cc'],
+    ]
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='\n'.join(description_lines),
+                        files=files)
+
+    executer = presubmit.PresubmitExecuter(ci, False)
+    self.failIf(executer.ExecPresubmitScript('', 'PRESUBMIT.py'))
+    # No error if no on-upload entry point
+    self.failIf(executer.ExecPresubmitScript(
+      ('def CheckChangeOnCommit(input_api, output_api):\n'
+       '  return (output_api.PresubmitError("!!"))\n'),
+      'PRESUBMIT.py'
+    ))
+
+    executer = presubmit.PresubmitExecuter(ci, True)
+    # No error if no on-commit entry point
+    self.failIf(executer.ExecPresubmitScript(
+      ('def CheckChangeOnUpload(input_api, output_api):\n'
+       '  return (output_api.PresubmitError("!!"))\n'),
+      'PRESUBMIT.py'
+    ))
+
+    self.failIf(executer.ExecPresubmitScript(
+      ('def CheckChangeOnUpload(input_api, output_api):\n'
+       '  if not input_api.change.STORY:\n'
+       '    return (output_api.PresubmitError("!!"))\n'
+       '  else:\n'
+       '    return ()'),
+      'PRESUBMIT.py'
+    ))
+
+    self.failUnless(executer.ExecPresubmitScript(
+      ('def CheckChangeOnCommit(input_api, output_api):\n'
+       '  if not input_api.change.NOSUCHKEY:\n'
+       '    return [output_api.PresubmitError("!!")]\n'
+       '  else:\n'
+       '    return ()'),
+      'PRESUBMIT.py'
+    ))
+
+    try:
+      executer.ExecPresubmitScript(
+        ('def CheckChangeOnCommit(input_api, output_api):\n'
+         '  return "foo"'),
+        'PRESUBMIT.py')
+      self.fail()
+    except:
+      pass  # expected case
+
+    try:
+      executer.ExecPresubmitScript(
+        ('def CheckChangeOnCommit(input_api, output_api):\n'
+         '  return ["foo"]'),
+        'PRESUBMIT.py')
+      self.fail()
+    except:
+      pass  # expected case
+
+  def testDoPresubmitChecks(self):
+    description_lines = ('Hello there',
+                         'this is a change',
+                         'STORY=http://tracker/123')
+    files = [
+      ['A', 'haspresubmit\\blat.cc'],
+    ]
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='\n'.join(description_lines),
+                        files=files)
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO('y\n')
+
+    self.failIf(presubmit.DoPresubmitChecks(ci, False, False, output, input,
+                                            None))
+    self.assertEqual(output.getvalue().count('!!'), 2)
+
+  def testDoPresubmitChecksPromptsAfterWarnings(self):
+    description_lines = ('Hello there',
+                         'this is a change',
+                         'NOSUCHKEY=http://tracker/123')
+    files = [
+      ['A', 'haspresubmit\\blat.cc'],
+    ]
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='\n'.join(description_lines),
+                        files=files)
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO('n\n')  # say no to the warning
+
+    self.failIf(presubmit.DoPresubmitChecks(ci, False, False, output, input,
+                                            None))
+    self.assertEqual(output.getvalue().count('??'), 2)
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO('y\n')  # say yes to the warning
+
+    self.failUnless(presubmit.DoPresubmitChecks(ci,
+                                                False,
+                                                False,
+                                                output,
+                                                input,
+                                                None))
+    self.failUnless(output.getvalue().count('??'))
+
+  def testDoPresubmitChecksNoWarningPromptIfErrors(self):
+    description_lines = ('Hello there',
+                         'this is a change',
+                         'NOSUCHKEY=http://tracker/123',
+                         'REALLYNOSUCHKEY=http://tracker/123')
+    files = [
+      ['A', 'haspresubmit\\blat.cc'],
+    ]
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='\n'.join(description_lines),
+                        files=files)
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO()  # should be unused
+
+    self.failIf(presubmit.DoPresubmitChecks(ci, False, False, output, input,
+                                            None))
+    self.assertEqual(output.getvalue().count('??'), 2)
+    self.assertEqual(output.getvalue().count('XX!!XX'), 2)
+    self.assertEqual(output.getvalue().count('(y/N)'), 0)
+
+  def testDoDefaultPresubmitChecks(self):
+    description_lines = ('Hello there',
+                         'this is a change',
+                         'STORY=http://tracker/123')
+    files = [
+      ['A', 'haspresubmit\\blat.cc'],
+    ]
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='\n'.join(description_lines),
+                        files=files)
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO('y\n')
+    DEFAULT_SCRIPT = """
+def CheckChangeOnUpload(input_api, output_api):
+  return [output_api.PresubmitError("!!")]
+"""
+    def MockReadFile(dummy):
+      return ''
+    gcl.ReadFile = MockReadFile
+    def MockIsFile(dummy):
+      return False
+    os.path.isfile = MockIsFile
+    self.failUnless(presubmit.DoPresubmitChecks(ci, False, False, output, input,
+                                                DEFAULT_SCRIPT))
+    self.failIf(output.getvalue().count('!!') == 1)
+
+  def testDirectoryHandling(self):
+    files = [
+      ['A', 'isdir'],
+      ['A', 'isdir\\blat.cc'],
+    ]
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='foo',
+                        files=files)
+    change = presubmit.GclChange(ci)
+
+    affected_files = change.AffectedFiles(include_dirs=False)
+    self.failUnless(len(affected_files) == 1)
+    self.failUnless(affected_files[0].LocalPath().endswith('blat.cc'))
+
+    affected_files_and_dirs = change.AffectedFiles(include_dirs=True)
+    self.failUnless(len(affected_files_and_dirs) == 2)
+
+  def testSvnProperty(self):
+    affected_file = presubmit.AffectedFile('foo.cc', 'A')
+    self.failUnless(affected_file.SvnProperty('svn:secret-property') ==
+                    'secret-property-value')
+
+
+class InputApiUnittest(PresubmitTestsBase):
+  """Tests presubmit.InputApi."""
+  def testMembersChanged(self):
+    members = [
+      'AbsoluteLocalPaths', 'AffectedFiles', 'AffectedTextFiles',
+      'DepotToLocalPath', 'FilterTextFiles', 'LocalPaths', 'LocalToDepotPath',
+      'PresubmitLocalPath', 'RightHandSideLines', 'ServerPaths',
+      'basename', 'cPickle', 'cStringIO', 'canned_checks', 'change',
+      'current_presubmit_path', 'marshal', 'os_path', 'pickle', 'platform',
+      're', 'subprocess', 'tempfile', 'urllib2',
+    ]
+    # If this test fails, you should add the relevant test.
+    self.compareMembers(presubmit.InputApi(None, None), members)
+
+  def testDepotToLocalPath(self):
+    path = presubmit.InputApi.DepotToLocalPath('svn:/foo/smurf')
+    self.failUnless(path == 'smurf')
+    path = presubmit.InputApi.DepotToLocalPath('svn:/foo/notfound/burp')
+    self.failUnless(path == None)
+
+  def testLocalToDepotPath(self):
+    path = presubmit.InputApi.LocalToDepotPath('smurf')
+    self.failUnless(path == 'svn:/foo/smurf')
+    path = presubmit.InputApi.LocalToDepotPath('notfound-food')
+    self.failUnless(path == None)
+
+  def testInputApiConstruction(self):
+    # Fudge the change object, it's not used during construction anyway
+    api = presubmit.InputApi(change=42, presubmit_path='foo/path')
+    self.failUnless(api.PresubmitLocalPath() == 'foo/path')
+    self.failUnless(api.change == 42)
+
+  def testFilterTextFiles(self):
+    class MockAffectedFile(object):
+      def __init__(self, path, action):
+        self.path = path
+        self.action = action
+      def Action(self):
+        return self.action
+      def LocalPath(self):
+        return self.path
+      def AbsoluteLocalPath(self):
+        return self.path
+
+    list = [MockAffectedFile('foo/blat.txt', 'M'),
+            MockAffectedFile('foo/binary.blob', 'M'),
+            MockAffectedFile('blat/flop.txt', 'D')]
+
+    output = presubmit.InputApi.FilterTextFiles(list, include_deletes=True)
+    self.failUnless(len(output) == 2)
+    self.failUnless(list[0] in output and list[2] in output)
+
+    output = presubmit.InputApi.FilterTextFiles(list, include_deletes=False)
+    self.failUnless(len(output) == 1)
+    self.failUnless(list[0] in output)
+
+  def testInputApiPresubmitScriptFiltering(self):
+    description_lines = ('Hello there',
+                         'this is a change',
+                         'BUG=123',
+                         ' STORY =http://foo/  \t',
+                         'and some more regular text')
+    files = [
+      ['A', os.path.join('foo', 'blat.cc')],
+      ['M', os.path.join('foo', 'blat', 'binary.dll')],
+      ['D', 'foo/mat/beingdeleted.txt'],
+      ['M', 'flop/notfound.txt'],
+      ['A', 'boo/flap.h'],
+    ]
+
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='\n'.join(description_lines),
+                        files=files)
+    change = presubmit.GclChange(ci)
+
+    api = presubmit.InputApi(change, 'foo/PRESUBMIT.py')
+
+    affected_files = api.AffectedFiles()
+    self.failUnless(len(affected_files) == 3)
+    self.failUnless(affected_files[0].LocalPath() ==
+                    presubmit.normpath('foo/blat.cc'))
+    self.failUnless(affected_files[1].LocalPath() ==
+                    presubmit.normpath('foo/blat/binary.dll'))
+    self.failUnless(affected_files[2].LocalPath() ==
+                    presubmit.normpath('foo/mat/beingdeleted.txt'))
+
+    rhs_lines = []
+    for line in api.RightHandSideLines():
+      rhs_lines.append(line)
+    self.failUnless(len(rhs_lines) == 2)
+    self.failUnless(rhs_lines[0][0].LocalPath() ==
+                    presubmit.normpath('foo/blat.cc'))
+
+  def testGetAbsoluteLocalPath(self):
+    # Regression test for bug of presubmit stuff that relies on invoking
+    # SVN (e.g. to get mime type of file) not working unless gcl invoked
+    # from the client root (e.g. if you were at 'src' and did 'cd base' before
+    # invoking 'gcl upload' it would fail because svn wouldn't find the files
+    # the presubmit script was asking about).
+    files = [
+      ['A', 'isdir'],
+      ['A', os.path.join('isdir', 'blat.cc')]
+    ]
+    ci = gcl.ChangeInfo(name='mychange',
+                        description='',
+                        files=files)
+    # It doesn't make sense on non-Windows platform. This is somewhat hacky,
+    # but it is needed since we can't just use os.path.join('c:', 'temp').
+    change = presubmit.GclChange(ci, 'c:' + os.sep + 'temp')
+    affected_files = change.AffectedFiles(include_dirs=True)
+    # Local paths should remain the same
+    self.failUnless(affected_files[0].LocalPath() ==
+                    presubmit.normpath('isdir'))
+    self.failUnless(affected_files[1].LocalPath() ==
+                    presubmit.normpath('isdir/blat.cc'))
+    # Absolute paths should be prefixed
+    self.failUnless(affected_files[0].AbsoluteLocalPath() ==
+                    presubmit.normpath('c:/temp/isdir'))
+    self.failUnless(affected_files[1].AbsoluteLocalPath() ==
+                    presubmit.normpath('c:/temp/isdir/blat.cc'))
+
+    # New helper functions need to work
+    absolute_paths_from_change = change.AbsoluteLocalPaths(include_dirs=True)
+    api = presubmit.InputApi(change=change, presubmit_path='isdir/PRESUBMIT.py')
+    absolute_paths_from_api = api.AbsoluteLocalPaths(include_dirs=True)
+    for absolute_paths in [absolute_paths_from_change,
+                           absolute_paths_from_api]:
+      self.failUnless(absolute_paths[0] == presubmit.normpath('c:/temp/isdir'))
+      self.failUnless(absolute_paths[1] ==
+                      presubmit.normpath('c:/temp/isdir/blat.cc'))
+
+
+class OuputApiUnittest(PresubmitTestsBase):
+  """Tests presubmit.OutputApi."""
+  def testMembersChanged(self):
+    members = [
+      'MailTextResult', 'PresubmitError', 'PresubmitNotifyResult',
+      'PresubmitPromptWarning', 'PresubmitResult',
+    ]
+    # If this test fails, you should add the relevant test.
+    self.compareMembers(presubmit.OutputApi(), members)
+
+  def testOutputApiBasics(self):
+    self.failUnless(presubmit.OutputApi.PresubmitError('').IsFatal())
+    self.failIf(presubmit.OutputApi.PresubmitError('').ShouldPrompt())
+
+    self.failIf(presubmit.OutputApi.PresubmitPromptWarning('').IsFatal())
+    self.failUnless(
+        presubmit.OutputApi.PresubmitPromptWarning('').ShouldPrompt())
+
+    self.failIf(presubmit.OutputApi.PresubmitNotifyResult('').IsFatal())
+    self.failIf(presubmit.OutputApi.PresubmitNotifyResult('').ShouldPrompt())
+
+    # TODO(joi) Test MailTextResult once implemented.
+
+  def testOutputApiHandling(self):
+    output = StringIO.StringIO()
+    unused_input = StringIO.StringIO()
+    error = presubmit.OutputApi.PresubmitError('!!!')
+    self.failIf(error._Handle(output, unused_input))
+    self.failUnless(output.getvalue().count('!!!'))
+
+    output = StringIO.StringIO()
+    notify = presubmit.OutputApi.PresubmitNotifyResult('?see?')
+    self.failUnless(notify._Handle(output, unused_input))
+    self.failUnless(output.getvalue().count('?see?'))
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO('y')
+    warning = presubmit.OutputApi.PresubmitPromptWarning('???')
+    self.failUnless(warning._Handle(output, input))
+    self.failUnless(output.getvalue().count('???'))
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO('n')
+    warning = presubmit.OutputApi.PresubmitPromptWarning('???')
+    self.failIf(warning._Handle(output, input))
+    self.failUnless(output.getvalue().count('???'))
+
+    output = StringIO.StringIO()
+    input = StringIO.StringIO('\n')
+    warning = presubmit.OutputApi.PresubmitPromptWarning('???')
+    self.failIf(warning._Handle(output, input))
+    self.failUnless(output.getvalue().count('???'))
+
+
+class CannedChecksUnittest(PresubmitTestsBase):
+  """Tests presubmit_canned_checks.py."""
+  class MockInputApi(object):
+    class MockUrllib2(object):
+      class urlopen(object):
+        def __init__(self, url):
+          if url == 'url_to_open':
+            self.result = '1'
+          else:
+            self.result = '0'
+        def read(self):
+          return self.result
+        def close(self):
+          pass
+    def __init__(self, lines=None):
+      self.lines = lines
+      self.basename = lambda x: x
+      self.urllib2 = self.MockUrllib2()
+      self.re = presubmit.re
+
+    def RightHandSideLines(self):
+      for line in self.lines:
+        yield (presubmit.AffectedFile('bingo', 'M'), 1, line)
+
+  def testMembersChanged(self):
+    members = [
+      'CheckChangeHasNoTabs', 'CheckChangeHasQaField',
+      'CheckChangeHasTestedField', 'CheckDoNotSubmit',
+      'CheckDoNotSubmitInDescription', 'CheckDoNotSubmitInFiles',
+      'CheckLongLines', 'CheckTreeIsOpen',
+    ]
+    # If this test fails, you should add the relevant test.
+    self.compareMembers(presubmit_canned_checks, members)
+
+  def testCannedCheckChangeHasTestedField(self):
+    change = self.MakeBasicChange('foo',
+                                  'Foo\nTESTED=did some stuff')
+    api = presubmit.InputApi(change, 'PRESUBMIT.py')
+    self.failIf(presubmit_canned_checks.CheckChangeHasTestedField(
+        api, presubmit.OutputApi))
+
+    change = self.MakeBasicChange('foo',
+                                  'Foo\nNEVERTESTED=did some stuff')
+    api = presubmit.InputApi(change, 'PRESUBMIT.py')
+    self.failUnless(presubmit_canned_checks.CheckChangeHasTestedField(
+        api, presubmit.OutputApi))
+
+  def testCannedCheckChangeHasQAField(self):
+    change = self.MakeBasicChange('foo',
+                                  'Foo\nQA=test floop feature very well')
+    api = presubmit.InputApi(change, 'PRESUBMIT.py')
+    self.failIf(presubmit_canned_checks.CheckChangeHasQaField(
+        api, presubmit.OutputApi))
+
+    change = self.MakeBasicChange('foo',
+                                  'Foo\nNOTFORQA=test floop feature very well')
+    api = presubmit.InputApi(change, 'PRESUBMIT.py')
+    self.failUnless(presubmit_canned_checks.CheckChangeHasQaField(
+        api, presubmit.OutputApi))
+
+  def testCannedCheckDoNotSubmitInDescription(self):
+    change = self.MakeBasicChange('foo', 'hello')
+    api = presubmit.InputApi(change, 'PRESUBMIT.py')
+    self.failIf(presubmit_canned_checks.CheckDoNotSubmitInDescription(
+        api, presubmit.OutputApi))
+
+    change = self.MakeBasicChange('foo',
+                                  'DO NOT ' + 'SUBMIT')
+    api = presubmit.InputApi(change, 'PRESUBMIT.py')
+    self.failUnless(presubmit_canned_checks.CheckDoNotSubmitInDescription(
+        api, presubmit.OutputApi))
+
+  def testCannedCheckDoNotSubmitInFiles(self):
+    self.failIf(presubmit_canned_checks.CheckDoNotSubmitInFiles(
+      self.MockInputApi(['hello', 'there']), presubmit.OutputApi
+    ))
+    self.failUnless(presubmit_canned_checks.CheckDoNotSubmitInFiles(
+      self.MockInputApi(['hello', 'yo, DO NOT ' + 'SUBMIT']),
+                      presubmit.OutputApi))
+
+  def testCannedCheckChangeHasNoTabs(self):
+    self.failIf(presubmit_canned_checks.CheckChangeHasNoTabs(
+      self.MockInputApi(['hello', 'there']), presubmit.OutputApi
+    ))
+    self.failUnless(presubmit_canned_checks.CheckChangeHasNoTabs(
+      self.MockInputApi(['hello', 'there\tit is']), presubmit.OutputApi
+    ))
+
+  def testCannedCheckLongLines(self):
+    self.failIf(presubmit_canned_checks.CheckLongLines(
+      self.MockInputApi(['hello', 'there']), presubmit.OutputApi, 5
+    ))
+    self.failUnless(presubmit_canned_checks.CheckLongLines(
+      self.MockInputApi(['hello', 'there!']), presubmit.OutputApi, 5
+    ))
+
+  def testCannedCheckTreeIsOpen(self):
+    self.failIf(presubmit_canned_checks.CheckTreeIsOpen(
+      self.MockInputApi(), presubmit.OutputApi, url='url_to_open', closed='0'
+    ))
+    self.failUnless(presubmit_canned_checks.CheckTreeIsOpen(
+      self.MockInputApi(), presubmit.OutputApi, url='url_to_closed', closed='0'
+    ))
+
+
+if __name__ == '__main__':
+  unittest.main()

+ 50 - 0
tests/test-lib.sh

@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Abort on error.
+set -e
+
+PWD=`pwd`
+REPO_URL=file://$PWD/svnrepo
+GIT_CL=$PWD/../git-cl
+
+# Set up an SVN repo that has a few commits to trunk.
+setup_initsvn() {
+  echo "Setting up test SVN repo..."
+  rm -rf svnrepo
+  svnadmin create svnrepo
+
+  rm -rf svn
+  svn co -q $REPO_URL svn
+  (
+    cd svn
+    echo "test" > test
+    svn add -q test
+    svn commit -q -m "initial commit"
+    echo "test2" >> test
+    svn commit -q -m "second commit"
+  )
+}
+
+# Set up a git-svn checkout of the repo.
+setup_gitsvn() {
+  echo "Setting up test git-svn repo..."
+  rm -rf git-svn
+  # There appears to be no way to make git-svn completely shut up, so we
+  # redirect its output.
+  git svn -q clone $REPO_URL git-svn >/dev/null 2>&1
+}
+
+cleanup() {
+  rm -rf svnrepo svn git-svn
+}
+
+# Usage: test_expect_success "description of test" "test code".
+test_expect_success() {
+  echo "TESTING: $1"
+  exit_code=0
+  sh -c "$2" || exit_code=$?
+  if [ $exit_code != 0 ]; then
+    echo "FAILURE: $1"
+    return $exit_code
+  fi
+}

+ 505 - 0
trychange.py

@@ -0,0 +1,505 @@
+#!/usr/bin/python
+# Copyright (c) 2009 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Client-side script to send a try job to the try server. It communicates to
+the try server by either writting to a svn repository or by directly connecting
+to the server by HTTP.
+"""
+
+
+import datetime
+import getpass
+import logging
+import optparse
+import os
+import shutil
+import sys
+import tempfile
+import traceback
+import urllib
+
+import gcl
+
+__version__ = '1.1'
+
+
+# Constants
+HELP_STRING = "Sorry, Tryserver is not available."
+SCRIPT_PATH = os.path.join('tools', 'tryserver', 'tryserver.py')
+USAGE = r"""%prog [options]
+
+Client-side script to send a try job to the try server. It communicates to
+the try server by either writting to a svn repository or by directly connecting
+to the server by HTTP.
+
+
+Examples:
+  A git patch off a web site (git inserts a/ and b/) and fix the base dir:
+    %prog --url http://url/to/patch.diff --patchlevel 1 --root src
+
+  Use svn to store the try job, specify an alternate email address and use a
+  premade diff file on the local drive:
+    %prog --email user@example.com
+            --svn_repo svn://svn.chromium.org/chrome-try/try --diff foo.diff
+
+  Running only on a 'mac' slave with revision src@123 and clobber first; specify
+  manually the 3 source files to use for the try job:
+    %prog --bot mac --revision src@123 --clobber -f src/a.cc -f src/a.h
+            -f include/b.h
+
+"""
+
+class InvalidScript(Exception):
+  def __str__(self):
+    return self.args[0] + '\n' + HELP_STRING
+
+
+class NoTryServerAccess(Exception):
+  def __str__(self):
+    return self.args[0] + '\n' + HELP_STRING
+
+
+def PathDifference(root, subpath):
+  """Returns the difference subpath minus root."""
+  if subpath.find(root) != 0:
+    return None
+  # The + 1 is for the trailing / or \.
+  return subpath[len(root) + len(os.sep):]
+
+
+def GetSourceRoot():
+  """Returns the absolute directory one level up from the repository root."""
+  return os.path.abspath(os.path.join(gcl.GetRepositoryRoot(), '..'))
+
+
+def ExecuteTryServerScript():
+  """Locates the tryserver script, executes it and returns its dictionary.
+
+  The try server script contains the repository-specific try server commands."""
+  script_locals = {}
+  try:
+    # gcl.GetRepositoryRoot() may throw an exception.
+    script_path = os.path.join(gcl.GetRepositoryRoot(), SCRIPT_PATH)
+  except Exception:
+    return script_locals
+  if os.path.exists(script_path):
+    try:
+      exec(gcl.ReadFile(script_path), script_locals)
+    except Exception, e:
+      # TODO(maruel):  Need to specialize the exception trapper.
+      traceback.print_exc()
+      raise InvalidScript('%s is invalid.' % script_path)
+  return script_locals
+
+
+def EscapeDot(name):
+  return name.replace('.', '-')
+
+
+def RunCommand(command):
+  output, retcode = gcl.RunShellWithReturnCode(command)
+  if retcode:
+    raise NoTryServerAccess(' '.join(command) + '\nOuput:\n' + output)
+  return output
+
+
+class SCM(object):
+  """Simplistic base class to implement one function: ProcessOptions."""
+  def __init__(self, options):
+    self.options = options
+
+  def ProcessOptions(self):
+    raise Unimplemented
+
+
+class SVN(SCM):
+  """Gathers the options and diff for a subversion checkout."""
+  def GenerateDiff(self, files, root):
+    """Returns a string containing the diff for the given file list.
+
+    The files in the list should either be absolute paths or relative to the
+    given root. If no root directory is provided, the repository root will be
+    used.
+    """
+    previous_cwd = os.getcwd()
+    if root is None:
+      os.chdir(gcl.GetRepositoryRoot())
+    else:
+      os.chdir(root)
+
+    diff = []
+    for file in files:
+      # Use svn info output instead of os.path.isdir because the latter fails
+      # when the file is deleted.
+      if gcl.GetSVNFileInfo(file).get("Node Kind") == "directory":
+        continue
+      # If the user specified a custom diff command in their svn config file,
+      # then it'll be used when we do svn diff, which we don't want to happen
+      # since we want the unified diff.  Using --diff-cmd=diff doesn't always
+      # work, since they can have another diff executable in their path that
+      # gives different line endings.  So we use a bogus temp directory as the
+      # config directory, which gets around these problems.
+      if sys.platform.startswith("win"):
+        parent_dir = tempfile.gettempdir()
+      else:
+        parent_dir = sys.path[0]  # tempdir is not secure.
+      bogus_dir = os.path.join(parent_dir, "temp_svn_config")
+      if not os.path.exists(bogus_dir):
+        os.mkdir(bogus_dir)
+      # Grabs the diff data.
+      data = gcl.RunShell(["svn", "diff", "--config-dir", bogus_dir, file])
+
+      # We know the diff will be incorrectly formatted. Fix it.
+      if gcl.IsSVNMoved(file):
+        # The file is "new" in the patch sense. Generate a homebrew diff.
+        # We can't use ReadFile() since it's not using binary mode.
+        file_handle = open(file, 'rb')
+        file_content = file_handle.read()
+        file_handle.close()
+        # Prepend '+ ' to every lines.
+        file_content = ['+ ' + i for i in file_content.splitlines(True)]
+        nb_lines = len(file_content)
+        # We need to use / since patch on unix will fail otherwise.
+        file = file.replace('\\', '/')
+        data = "Index: %s\n" % file
+        data += ("============================================================="
+                 "======\n")
+        # Note: Should we use /dev/null instead?
+        data += "--- %s\n" % file
+        data += "+++ %s\n" % file
+        data += "@@ -0,0 +1,%d @@\n" % nb_lines
+        data += ''.join(file_content)
+      diff.append(data)
+    os.chdir(previous_cwd)
+    return "".join(diff)
+
+  def ProcessOptions(self):
+    if not self.options.diff:
+      # Generate the diff with svn and write it to the submit queue path.  The
+      # files are relative to the repository root, but we need patches relative
+      # to one level up from there (i.e., 'src'), so adjust both the file
+      # paths and the root of the diff.
+      source_root = GetSourceRoot()
+      prefix = PathDifference(source_root, gcl.GetRepositoryRoot())
+      adjusted_paths = [os.path.join(prefix, x) for x in self.options.files]
+      self.options.diff = self.GenerateDiff(adjusted_paths, root=source_root)
+
+
+class GIT(SCM):
+  """Gathers the options and diff for a git checkout."""
+  def GenerateDiff(self):
+    """Get the diff we'll send to the try server. We ignore the files list."""
+    branch = upload.RunShell(['git', 'cl', 'upstream']).strip()
+    diff = upload.RunShell(['git', 'diff-tree', '-p', '--no-prefix',
+                            branch, 'HEAD']).splitlines(True)
+    for i in range(len(diff)):
+      # In the case of added files, replace /dev/null with the path to the
+      # file being added.
+      if diff[i].startswith('--- /dev/null'):
+        diff[i] = '--- %s' % diff[i+1][4:]
+    return ''.join(diff)
+
+  def GetEmail(self):
+    # TODO: check for errors here?
+    return upload.RunShell(['git', 'config', 'user.email']).strip()
+
+  def GetPatchName(self):
+    """Construct a name for this patch."""
+    # TODO: perhaps include the hash of the current commit, to distinguish
+    # patches?
+    branch = upload.RunShell(['git', 'symbolic-ref', 'HEAD']).strip()
+    if not branch.startswith('refs/heads/'):
+      raise "Couldn't figure out branch name"
+    branch = branch[len('refs/heads/'):]
+    return branch
+
+  def ProcessOptions(self):
+    if not self.options.diff:
+      self.options.diff = self.GenerateDiff()
+    if not self.options.name:
+      self.options.name = self.GetPatchName()
+    if not self.options.email:
+      self.options.email = self.GetEmail()
+
+
+def _ParseSendChangeOptions(options):
+  """Parse common options passed to _SendChangeHTTP and _SendChangeSVN."""
+  values = {}
+  if options.email:
+    values['email'] = options.email
+  values['user'] = options.user
+  values['name'] = options.name
+  if options.bot:
+    values['bot'] = ','.join(options.bot)
+  if options.revision:
+    values['revision'] = options.revision
+  if options.clobber:
+    values['clobber'] = 'true'
+  if options.tests:
+    values['tests'] = ','.join(options.tests)
+  if options.root:
+    values['root'] = options.root
+  if options.patchlevel:
+    values['patchlevel'] = options.patchlevel
+  if options.issue:
+    values['issue'] = options.issue
+  if options.patchset:
+    values['patchset'] = options.patchset
+  return values
+
+
+def _SendChangeHTTP(options):
+  """Send a change to the try server using the HTTP protocol."""
+  script_locals = ExecuteTryServerScript()
+
+  if not options.host:
+    options.host = script_locals.get('try_server_http_host', None)
+    if not options.host:
+      raise NoTryServerAccess('Please use the --host option to specify the try '
+          'server host to connect to.')
+  if not options.port:
+    options.port = script_locals.get('try_server_http_port', None)
+    if not options.port:
+      raise NoTryServerAccess('Please use the --port option to specify the try '
+          'server port to connect to.')
+
+  values = _ParseSendChangeOptions(options)
+  values['patch'] = options.diff
+
+  url = 'http://%s:%s/send_try_patch' % (options.host, options.port)
+  proxies = None
+  if options.proxy:
+    if options.proxy.lower() == 'none':
+      # Effectively disable HTTP_PROXY or Internet settings proxy setup.
+      proxies = {}
+    else:
+      proxies = {'http': options.proxy, 'https': options.proxy}
+  try:
+    connection = urllib.urlopen(url, urllib.urlencode(values), proxies=proxies)
+  except IOError, e:
+    # TODO(thestig) this probably isn't quite right.
+    if values.get('bot') and e[2] == 'got a bad status line':
+      raise NoTryServerAccess('%s is unaccessible. Bad --bot argument?' % url)
+    else:
+      raise NoTryServerAccess('%s is unaccessible.' % url)
+  if not connection:
+    raise NoTryServerAccess('%s is unaccessible.' % url)
+  if connection.read() != 'OK':
+    raise NoTryServerAccess('%s is unaccessible.' % url)
+  return options.name
+
+
+def _SendChangeSVN(options):
+  """Send a change to the try server by committing a diff file on a subversion
+  server."""
+  script_locals = ExecuteTryServerScript()
+  if not options.svn_repo:
+    options.svn_repo = script_locals.get('try_server_svn', None)
+    if not options.svn_repo:
+      raise NoTryServerAccess('Please use the --svn_repo option to specify the'
+                              ' try server svn repository to connect to.')
+
+  values = _ParseSendChangeOptions(options)
+  description = ''
+  for (k,v) in values.iteritems():
+    description += "%s=%s\n" % (k,v)
+
+  # Do an empty checkout.
+  temp_dir = tempfile.mkdtemp()
+  temp_file = tempfile.NamedTemporaryFile()
+  temp_file_name = temp_file.name
+  try:
+    RunCommand(['svn', 'checkout', '--depth', 'empty', '--non-interactive',
+                options.svn_repo, temp_dir])
+    # TODO(maruel): Use a subdirectory per user?
+    current_time = str(datetime.datetime.now()).replace(':', '.')
+    file_name = (EscapeDot(options.user) + '.' + EscapeDot(options.name) +
+                 '.%s.diff' % current_time)
+    full_path = os.path.join(temp_dir, file_name)
+    full_url = options.svn_repo + '/' + file_name
+    file_found = False
+    try:
+      RunCommand(['svn', 'ls', '--non-interactive', full_url])
+      file_found = True
+    except NoTryServerAccess:
+      pass
+    if file_found:
+      # The file already exists in the repo. Note that commiting a file is a
+      # no-op if the file's content (the diff) is not modified. This is why the
+      # file name contains the date and time.
+      RunCommand(['svn', 'update', '--non-interactive', full_path])
+      file = open(full_path, 'wb')
+      file.write(options.diff)
+      file.close()
+    else:
+      # Add the file to the repo
+      file = open(full_path, 'wb')
+      file.write(options.diff)
+      file.close()
+      RunCommand(["svn", "add", '--non-interactive', full_path])
+    temp_file.write(description)
+    temp_file.flush()
+    RunCommand(["svn", "commit", '--non-interactive', full_path, '--file',
+                temp_file_name])
+  finally:
+    temp_file.close()
+    shutil.rmtree(temp_dir, True)
+  return options.name
+
+
+def GuessVCS(options):
+  """Helper to guess the version control system.
+
+  NOTE: Very similar to upload.GuessVCS. Doesn't look for hg since we don't
+  support it yet.
+
+  This examines the current directory, guesses which SCM we're using, and
+  returns an instance of the appropriate class.  Exit with an error if we can't
+  figure it out.
+
+  Returns:
+    A SCM instance. Exits if the SCM can't be guessed.
+  """
+  # Subversion has a .svn in all working directories.
+  if os.path.isdir('.svn'):
+    logging.info("Guessed VCS = Subversion")
+    return SVN(options)
+
+  # Git has a command to test if you're in a git tree.
+  # Try running it, but don't die if we don't have git installed.
+  try:
+    out, returncode = gcl.RunShellWithReturnCode(["git", "rev-parse",
+                                                  "--is-inside-work-tree"])
+    if returncode == 0:
+      logging.info("Guessed VCS = Git")
+      return GIT(options)
+  except OSError, (errno, message):
+    if errno != 2:  # ENOENT -- they don't have git installed.
+      raise
+
+  raise NoTryServerAccess("Could not guess version control system. "
+                          "Are you in a working copy directory?")
+
+
+def TryChange(argv,
+              file_list,
+              swallow_exception,
+              prog=None):
+  # Parse argv
+  parser = optparse.OptionParser(usage=USAGE,
+                                 version=__version__,
+                                 prog=prog)
+
+  group = optparse.OptionGroup(parser, "Result and status")
+  group.add_option("-u", "--user", default=getpass.getuser(),
+                   help="Owner user name [default: %default]")
+  group.add_option("-e", "--email", default=os.environ.get('EMAIL_ADDRESS'),
+                   help="Email address where to send the results. Use the "
+                        "EMAIL_ADDRESS environment variable to set the default "
+                        "email address [default: %default]")
+  group.add_option("-n", "--name", default='Unnamed',
+                   help="Descriptive name of the try job")
+  group.add_option("--issue", type='int',
+                   help="Update rietveld issue try job status")
+  group.add_option("--patchset", type='int',
+                   help="Update rietveld issue try job status")
+  parser.add_option_group(group)
+
+  group = optparse.OptionGroup(parser, "Try job options")
+  group.add_option("-b", "--bot", action="append",
+                    help="Only use specifics build slaves, ex: '--bot win' to "
+                         "run the try job only on the 'win' slave; see the try "
+                         "server watefall for the slave's name")
+  group.add_option("-r", "--revision",
+                    help="Revision to use for the try job; default: the "
+                         "revision will be determined by the try server; see "
+                         "its waterfall for more info")
+  group.add_option("-c", "--clobber", action="store_true",
+                    help="Force a clobber before building; e.g. don't do an "
+                         "incremental build")
+  # Override the list of tests to run, use multiple times to list many tests
+  # (or comma separated)
+  group.add_option("-t", "--tests", action="append",
+                   help=optparse.SUPPRESS_HELP)
+  parser.add_option_group(group)
+
+  group = optparse.OptionGroup(parser, "Patch to run")
+  group.add_option("-f", "--file", default=file_list, dest="files",
+                   metavar="FILE", action="append",
+                   help="Use many times to list the files to include in the "
+                        "try, relative to the repository root")
+  group.add_option("--diff",
+                   help="File containing the diff to try")
+  group.add_option("--url",
+                   help="Url where to grab a patch")
+  group.add_option("--root",
+                   help="Root to use for the patch; base subdirectory for "
+                        "patch created in a subdirectory")
+  group.add_option("--patchlevel", type='int', metavar="LEVEL",
+                   help="Used as -pN parameter to patch")
+  parser.add_option_group(group)
+
+  group = optparse.OptionGroup(parser, "Access the try server by HTTP")
+  group.add_option("--use_http", action="store_const", const=_SendChangeHTTP,
+                   dest="send_patch", default=_SendChangeHTTP,
+                   help="Use HTTP to talk to the try server [default]")
+  group.add_option("--host",
+                   help="Host address")
+  group.add_option("--port",
+                   help="HTTP port")
+  group.add_option("--proxy",
+                   help="HTTP proxy")
+  parser.add_option_group(group)
+
+  group = optparse.OptionGroup(parser, "Access the try server with SVN")
+  group.add_option("--use_svn", action="store_const", const=_SendChangeSVN,
+                   dest="send_patch",
+                   help="Use SVN to talk to the try server")
+  group.add_option("--svn_repo", metavar="SVN_URL",
+                   help="SVN url to use to write the changes in; --use_svn is "
+                        "implied when using --svn_repo")
+  parser.add_option_group(group)
+
+  options, args = parser.parse_args(argv)
+  # Switch the default accordingly.
+  if options.svn_repo:
+    options.send_patch = _SendChangeSVN
+
+  if len(args) == 1 and args[0] == 'help':
+    parser.print_help()
+  if (not options.files and (not options.issue and options.patchset) and
+      not options.diff and not options.url):
+    # TODO(maruel): It should just try the modified files showing up in a
+    # svn status.
+    print "Nothing to try, changelist is empty."
+    return
+
+  try:
+    # Convert options.diff into the content of the diff.
+    if options.url:
+      options.diff = urllib.urlopen(options.url).read()
+    elif options.diff:
+      options.diff = gcl.ReadFile(options.diff)
+    # Process the VCS in any case at least to retrieve the email address.
+    try:
+      options.scm = GuessVCS(options)
+      options.scm.ProcessOptions()
+    except NoTryServerAccess, e:
+      # If we got the diff, we don't care.
+      if not options.diff:
+        raise
+
+    # Send the patch.
+    patch_name = options.send_patch(options)
+    print 'Patch \'%s\' sent to try server.' % patch_name
+    if patch_name == 'Unnamed':
+      print "Note: use --name NAME to change the try's name."
+  except (InvalidScript, NoTryServerAccess), e:
+    if swallow_exception:
+      return
+    print e
+
+
+if __name__ == "__main__":
+  TryChange(None, None, False)

+ 1373 - 0
upload.py

@@ -0,0 +1,1373 @@
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tool for uploading diffs from a version control system to the codereview app.
+
+Usage summary: upload.py [options] [-- diff_options]
+
+Diff options are passed to the diff command of the underlying system.
+
+Supported version control systems:
+  Git
+  Mercurial
+  Subversion
+
+It is important for Git/Mercurial users to specify a tree/node/branch to diff
+against by using the '--rev' option.
+"""
+# This code is derived from appcfg.py in the App Engine SDK (open source),
+# and from ASPN recipe #146306.
+
+import cookielib
+import getpass
+import logging
+import md5
+import mimetypes
+import optparse
+import os
+import re
+import socket
+import subprocess
+import sys
+import urllib
+import urllib2
+import urlparse
+
+try:
+  import readline
+except ImportError:
+  pass
+
+# The logging verbosity:
+#  0: Errors only.
+#  1: Status messages.
+#  2: Info logs.
+#  3: Debug logs.
+verbosity = 1
+
+# Max size of patch or base file.
+MAX_UPLOAD_SIZE = 900 * 1024
+
+
+def GetEmail():
+  """Prompts the user for their email address and returns it.
+
+  The last used email address is saved to a file and offered up as a suggestion
+  to the user. If the user presses enter without typing in anything the last
+  used email address is used. If the user enters a new address, it is saved
+  for next time we prompt.
+
+  """
+  last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
+  last_email = ""
+  prompt = "Email: "
+  if os.path.exists(last_email_file_name):
+    try:
+      last_email_file = open(last_email_file_name, "r")
+      last_email = last_email_file.readline().strip("\n")
+      last_email_file.close()
+      prompt = "Email [%s]: " % last_email
+    except IOError, e:
+      pass
+  email = raw_input(prompt).strip()
+  if email:
+    try:
+      last_email_file = open(last_email_file_name, "w")
+      last_email_file.write(email)
+      last_email_file.close()
+    except IOError, e:
+      pass
+  else:
+    email = last_email
+  return email
+
+
+def StatusUpdate(msg):
+  """Print a status message to stdout.
+
+  If 'verbosity' is greater than 0, print the message.
+
+  Args:
+    msg: The string to print.
+  """
+  if verbosity > 0:
+    print msg
+
+
+def ErrorExit(msg):
+  """Print an error message to stderr and exit."""
+  print >>sys.stderr, msg
+  sys.exit(1)
+
+
+class ClientLoginError(urllib2.HTTPError):
+  """Raised to indicate there was an error authenticating with ClientLogin."""
+
+  def __init__(self, url, code, msg, headers, args):
+    urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
+    self.args = args
+    self.reason = args["Error"]
+
+
+class AbstractRpcServer(object):
+  """Provides a common interface for a simple RPC server."""
+
+  def __init__(self, host, auth_function, host_override=None, extra_headers={},
+               save_cookies=False):
+    """Creates a new HttpRpcServer.
+
+    Args:
+      host: The host to send requests to.
+      auth_function: A function that takes no arguments and returns an
+        (email, password) tuple when called. Will be called if authentication
+        is required.
+      host_override: The host header to send to the server (defaults to host).
+      extra_headers: A dict of extra headers to append to every request.
+      save_cookies: If True, save the authentication cookies to local disk.
+        If False, use an in-memory cookiejar instead.  Subclasses must
+        implement this functionality.  Defaults to False.
+    """
+    self.host = host
+    self.host_override = host_override
+    self.auth_function = auth_function
+    self.authenticated = False
+    self.extra_headers = extra_headers
+    self.save_cookies = save_cookies
+    self.opener = self._GetOpener()
+    if self.host_override:
+      logging.info("Server: %s; Host: %s", self.host, self.host_override)
+    else:
+      logging.info("Server: %s", self.host)
+
+  def _GetOpener(self):
+    """Returns an OpenerDirector for making HTTP requests.
+
+    Returns:
+      A urllib2.OpenerDirector object.
+    """
+    raise NotImplementedError()
+
+  def _CreateRequest(self, url, data=None):
+    """Creates a new urllib request."""
+    logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
+    req = urllib2.Request(url, data=data)
+    if self.host_override:
+      req.add_header("Host", self.host_override)
+    for key, value in self.extra_headers.iteritems():
+      req.add_header(key, value)
+    return req
+
+  def _GetAuthToken(self, email, password):
+    """Uses ClientLogin to authenticate the user, returning an auth token.
+
+    Args:
+      email:    The user's email address
+      password: The user's password
+
+    Raises:
+      ClientLoginError: If there was an error authenticating with ClientLogin.
+      HTTPError: If there was some other form of HTTP error.
+
+    Returns:
+      The authentication token returned by ClientLogin.
+    """
+    account_type = "GOOGLE"
+    if self.host.endswith(".google.com"):
+      # Needed for use inside Google.
+      account_type = "HOSTED"
+    req = self._CreateRequest(
+        url="https://www.google.com/accounts/ClientLogin",
+        data=urllib.urlencode({
+            "Email": email,
+            "Passwd": password,
+            "service": "ah",
+            "source": "rietveld-codereview-upload",
+            "accountType": account_type,
+        }),
+    )
+    try:
+      response = self.opener.open(req)
+      response_body = response.read()
+      response_dict = dict(x.split("=")
+                           for x in response_body.split("\n") if x)
+      return response_dict["Auth"]
+    except urllib2.HTTPError, e:
+      if e.code == 403:
+        body = e.read()
+        response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
+        raise ClientLoginError(req.get_full_url(), e.code, e.msg,
+                               e.headers, response_dict)
+      else:
+        raise
+
+  def _GetAuthCookie(self, auth_token):
+    """Fetches authentication cookies for an authentication token.
+
+    Args:
+      auth_token: The authentication token returned by ClientLogin.
+
+    Raises:
+      HTTPError: If there was an error fetching the authentication cookies.
+    """
+    # This is a dummy value to allow us to identify when we're successful.
+    continue_location = "http://localhost/"
+    args = {"continue": continue_location, "auth": auth_token}
+    req = self._CreateRequest("http://%s/_ah/login?%s" %
+                              (self.host, urllib.urlencode(args)))
+    try:
+      response = self.opener.open(req)
+    except urllib2.HTTPError, e:
+      response = e
+    if (response.code != 302 or
+        response.info()["location"] != continue_location):
+      raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
+                              response.headers, response.fp)
+    self.authenticated = True
+
+  def _Authenticate(self):
+    """Authenticates the user.
+
+    The authentication process works as follows:
+     1) We get a username and password from the user
+     2) We use ClientLogin to obtain an AUTH token for the user
+        (see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
+     3) We pass the auth token to /_ah/login on the server to obtain an
+        authentication cookie. If login was successful, it tries to redirect
+        us to the URL we provided.
+
+    If we attempt to access the upload API without first obtaining an
+    authentication cookie, it returns a 401 response and directs us to
+    authenticate ourselves with ClientLogin.
+    """
+    for i in range(3):
+      credentials = self.auth_function()
+      try:
+        auth_token = self._GetAuthToken(credentials[0], credentials[1])
+      except ClientLoginError, e:
+        if e.reason == "BadAuthentication":
+          print >>sys.stderr, "Invalid username or password."
+          continue
+        if e.reason == "CaptchaRequired":
+          print >>sys.stderr, (
+              "Please go to\n"
+              "https://www.google.com/accounts/DisplayUnlockCaptcha\n"
+              "and verify you are a human.  Then try again.")
+          break
+        if e.reason == "NotVerified":
+          print >>sys.stderr, "Account not verified."
+          break
+        if e.reason == "TermsNotAgreed":
+          print >>sys.stderr, "User has not agreed to TOS."
+          break
+        if e.reason == "AccountDeleted":
+          print >>sys.stderr, "The user account has been deleted."
+          break
+        if e.reason == "AccountDisabled":
+          print >>sys.stderr, "The user account has been disabled."
+          break
+        if e.reason == "ServiceDisabled":
+          print >>sys.stderr, ("The user's access to the service has been "
+                               "disabled.")
+          break
+        if e.reason == "ServiceUnavailable":
+          print >>sys.stderr, "The service is not available; try again later."
+          break
+        raise
+      self._GetAuthCookie(auth_token)
+      return
+
+  def Send(self, request_path, payload=None,
+           content_type="application/octet-stream",
+           timeout=None,
+           **kwargs):
+    """Sends an RPC and returns the response.
+
+    Args:
+      request_path: The path to send the request to, eg /api/appversion/create.
+      payload: The body of the request, or None to send an empty request.
+      content_type: The Content-Type header to use.
+      timeout: timeout in seconds; default None i.e. no timeout.
+        (Note: for large requests on OS X, the timeout doesn't work right.)
+      kwargs: Any keyword arguments are converted into query string parameters.
+
+    Returns:
+      The response body, as a string.
+    """
+    # TODO: Don't require authentication.  Let the server say
+    # whether it is necessary.
+    if not self.authenticated:
+      self._Authenticate()
+
+    old_timeout = socket.getdefaulttimeout()
+    socket.setdefaulttimeout(timeout)
+    try:
+      tries = 0
+      while True:
+        tries += 1
+        args = dict(kwargs)
+        url = "http://%s%s" % (self.host, request_path)
+        if args:
+          url += "?" + urllib.urlencode(args)
+        req = self._CreateRequest(url=url, data=payload)
+        req.add_header("Content-Type", content_type)
+        try:
+          f = self.opener.open(req)
+          response = f.read()
+          f.close()
+          return response
+        except urllib2.HTTPError, e:
+          if tries > 3:
+            raise
+          elif e.code == 401:
+            self._Authenticate()
+##           elif e.code >= 500 and e.code < 600:
+##             # Server Error - try again.
+##             continue
+          else:
+            raise
+    finally:
+      socket.setdefaulttimeout(old_timeout)
+
+
+class HttpRpcServer(AbstractRpcServer):
+  """Provides a simplified RPC-style interface for HTTP requests."""
+
+  def _Authenticate(self):
+    """Save the cookie jar after authentication."""
+    super(HttpRpcServer, self)._Authenticate()
+    if self.save_cookies:
+      StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
+      self.cookie_jar.save()
+
+  def _GetOpener(self):
+    """Returns an OpenerDirector that supports cookies and ignores redirects.
+
+    Returns:
+      A urllib2.OpenerDirector object.
+    """
+    opener = urllib2.OpenerDirector()
+    opener.add_handler(urllib2.ProxyHandler())
+    opener.add_handler(urllib2.UnknownHandler())
+    opener.add_handler(urllib2.HTTPHandler())
+    opener.add_handler(urllib2.HTTPDefaultErrorHandler())
+    opener.add_handler(urllib2.HTTPSHandler())
+    opener.add_handler(urllib2.HTTPErrorProcessor())
+    if self.save_cookies:
+      self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
+      self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
+      if os.path.exists(self.cookie_file):
+        try:
+          self.cookie_jar.load()
+          self.authenticated = True
+          StatusUpdate("Loaded authentication cookies from %s" %
+                       self.cookie_file)
+        except (cookielib.LoadError, IOError):
+          # Failed to load cookies - just ignore them.
+          pass
+      else:
+        # Create an empty cookie file with mode 600
+        fd = os.open(self.cookie_file, os.O_CREAT, 0600)
+        os.close(fd)
+      # Always chmod the cookie file
+      os.chmod(self.cookie_file, 0600)
+    else:
+      # Don't save cookies across runs of update.py.
+      self.cookie_jar = cookielib.CookieJar()
+    opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
+    return opener
+
+
+parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
+parser.add_option("-y", "--assume_yes", action="store_true",
+                  dest="assume_yes", default=False,
+                  help="Assume that the answer to yes/no questions is 'yes'.")
+# Logging
+group = parser.add_option_group("Logging options")
+group.add_option("-q", "--quiet", action="store_const", const=0,
+                 dest="verbose", help="Print errors only.")
+group.add_option("-v", "--verbose", action="store_const", const=2,
+                 dest="verbose", default=1,
+                 help="Print info level logs (default).")
+group.add_option("--noisy", action="store_const", const=3,
+                 dest="verbose", help="Print all logs.")
+# Review server
+group = parser.add_option_group("Review server options")
+group.add_option("-s", "--server", action="store", dest="server",
+                 default="codereview.appspot.com",
+                 metavar="SERVER",
+                 help=("The server to upload to. The format is host[:port]. "
+                       "Defaults to 'codereview.appspot.com'."))
+group.add_option("-e", "--email", action="store", dest="email",
+                 metavar="EMAIL", default=None,
+                 help="The username to use. Will prompt if omitted.")
+group.add_option("-H", "--host", action="store", dest="host",
+                 metavar="HOST", default=None,
+                 help="Overrides the Host header sent with all RPCs.")
+group.add_option("--no_cookies", action="store_false",
+                 dest="save_cookies", default=True,
+                 help="Do not save authentication cookies to local disk.")
+# Issue
+group = parser.add_option_group("Issue options")
+group.add_option("-d", "--description", action="store", dest="description",
+                 metavar="DESCRIPTION", default=None,
+                 help="Optional description when creating an issue.")
+group.add_option("-f", "--description_file", action="store",
+                 dest="description_file", metavar="DESCRIPTION_FILE",
+                 default=None,
+                 help="Optional path of a file that contains "
+                      "the description when creating an issue.")
+group.add_option("-r", "--reviewers", action="store", dest="reviewers",
+                 metavar="REVIEWERS", default=None,
+                 help="Add reviewers (comma separated email addresses).")
+group.add_option("--cc", action="store", dest="cc",
+                 metavar="CC", default=None,
+                 help="Add CC (comma separated email addresses).")
+# Upload options
+group = parser.add_option_group("Patch options")
+group.add_option("-m", "--message", action="store", dest="message",
+                 metavar="MESSAGE", default=None,
+                 help="A message to identify the patch. "
+                      "Will prompt if omitted.")
+group.add_option("-i", "--issue", type="int", action="store",
+                 metavar="ISSUE", default=None,
+                 help="Issue number to which to add. Defaults to new issue.")
+group.add_option("--download_base", action="store_true",
+                 dest="download_base", default=False,
+                 help="Base files will be downloaded by the server "
+                 "(side-by-side diffs may not work on files with CRs).")
+group.add_option("--rev", action="store", dest="revision",
+                 metavar="REV", default=None,
+                 help="Branch/tree/revision to diff against (used by DVCS).")
+group.add_option("--send_mail", action="store_true",
+                 dest="send_mail", default=False,
+                 help="Send notification email to reviewers.")
+
+
+def GetRpcServer(options):
+  """Returns an instance of an AbstractRpcServer.
+
+  Returns:
+    A new AbstractRpcServer, on which RPC calls can be made.
+  """
+
+  rpc_server_class = HttpRpcServer
+
+  def GetUserCredentials():
+    """Prompts the user for a username and password."""
+    email = options.email
+    if email is None:
+      email = GetEmail()
+    password = getpass.getpass("Password for %s: " % email)
+    return (email, password)
+
+  # If this is the dev_appserver, use fake authentication.
+  host = (options.host or options.server).lower()
+  if host == "localhost" or host.startswith("localhost:"):
+    email = options.email
+    if email is None:
+      email = "test@example.com"
+      logging.info("Using debug user %s.  Override with --email" % email)
+    server = rpc_server_class(
+        options.server,
+        lambda: (email, "password"),
+        host_override=options.host,
+        extra_headers={"Cookie":
+                       'dev_appserver_login="%s:False"' % email},
+        save_cookies=options.save_cookies)
+    # Don't try to talk to ClientLogin.
+    server.authenticated = True
+    return server
+
+  return rpc_server_class(options.server, GetUserCredentials,
+                          host_override=options.host,
+                          save_cookies=options.save_cookies)
+
+
+def EncodeMultipartFormData(fields, files):
+  """Encode form fields for multipart/form-data.
+
+  Args:
+    fields: A sequence of (name, value) elements for regular form fields.
+    files: A sequence of (name, filename, value) elements for data to be
+           uploaded as files.
+  Returns:
+    (content_type, body) ready for httplib.HTTP instance.
+
+  Source:
+    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
+  """
+  BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+  CRLF = '\r\n'
+  lines = []
+  for (key, value) in fields:
+    lines.append('--' + BOUNDARY)
+    lines.append('Content-Disposition: form-data; name="%s"' % key)
+    lines.append('')
+    lines.append(value)
+  for (key, filename, value) in files:
+    lines.append('--' + BOUNDARY)
+    lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
+             (key, filename))
+    lines.append('Content-Type: %s' % GetContentType(filename))
+    lines.append('')
+    lines.append(value)
+  lines.append('--' + BOUNDARY + '--')
+  lines.append('')
+  body = CRLF.join(lines)
+  content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+  return content_type, body
+
+
+def GetContentType(filename):
+  """Helper to guess the content-type from the filename."""
+  return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+# Use a shell for subcommands on Windows to get a PATH search.
+use_shell = sys.platform.startswith("win")
+
+def RunShellWithReturnCode(command, print_output=False,
+                           universal_newlines=True):
+  """Executes a command and returns the output from stdout and the return code.
+
+  Args:
+    command: Command to execute.
+    print_output: If True, the output is printed to stdout.
+                  If False, both stdout and stderr are ignored.
+    universal_newlines: Use universal_newlines flag (default: True).
+
+  Returns:
+    Tuple (output, return code)
+  """
+  logging.info("Running %s", command)
+  p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                       shell=use_shell, universal_newlines=universal_newlines)
+  if print_output:
+    output_array = []
+    while True:
+      line = p.stdout.readline()
+      if not line:
+        break
+      print line.strip("\n")
+      output_array.append(line)
+    output = "".join(output_array)
+  else:
+    output = p.stdout.read()
+  p.wait()
+  errout = p.stderr.read()
+  if print_output and errout:
+    print >>sys.stderr, errout
+  p.stdout.close()
+  p.stderr.close()
+  return output, p.returncode
+
+
+def RunShell(command, silent_ok=False, universal_newlines=True,
+             print_output=False):
+  data, retcode = RunShellWithReturnCode(command, print_output,
+                                         universal_newlines)
+  if retcode:
+    ErrorExit("Got error status from %s:\n%s" % (command, data))
+  if not silent_ok and not data:
+    ErrorExit("No output from %s" % command)
+  return data
+
+
+class VersionControlSystem(object):
+  """Abstract base class providing an interface to the VCS."""
+
+  def __init__(self, options):
+    """Constructor.
+
+    Args:
+      options: Command line options.
+    """
+    self.options = options
+
+  def GenerateDiff(self, args):
+    """Return the current diff as a string.
+
+    Args:
+      args: Extra arguments to pass to the diff command.
+    """
+    raise NotImplementedError(
+        "abstract method -- subclass %s must override" % self.__class__)
+
+  def GetUnknownFiles(self):
+    """Return a list of files unknown to the VCS."""
+    raise NotImplementedError(
+        "abstract method -- subclass %s must override" % self.__class__)
+
+  def CheckForUnknownFiles(self):
+    """Show an "are you sure?" prompt if there are unknown files."""
+    unknown_files = self.GetUnknownFiles()
+    if unknown_files:
+      print "The following files are not added to version control:"
+      for line in unknown_files:
+        print line
+      prompt = "Are you sure to continue?(y/N) "
+      answer = raw_input(prompt).strip()
+      if answer != "y":
+        ErrorExit("User aborted")
+
+  def GetBaseFile(self, filename):
+    """Get the content of the upstream version of a file.
+
+    Returns:
+      A tuple (base_content, new_content, is_binary, status)
+        base_content: The contents of the base file.
+        new_content: For text files, this is empty.  For binary files, this is
+          the contents of the new file, since the diff output won't contain
+          information to reconstruct the current file.
+        is_binary: True iff the file is binary.
+        status: The status of the file.
+    """
+
+    raise NotImplementedError(
+        "abstract method -- subclass %s must override" % self.__class__)
+
+
+  def GetBaseFiles(self, diff):
+    """Helper that calls GetBase file for each file in the patch.
+
+    Returns:
+      A dictionary that maps from filename to GetBaseFile's tuple.  Filenames
+      are retrieved based on lines that start with "Index:" or
+      "Property changes on:".
+    """
+    files = {}
+    for line in diff.splitlines(True):
+      if line.startswith('Index:') or line.startswith('Property changes on:'):
+        unused, filename = line.split(':', 1)
+        # On Windows if a file has property changes its filename uses '\'
+        # instead of '/'.
+        filename = filename.strip().replace('\\', '/')
+        files[filename] = self.GetBaseFile(filename)
+    return files
+
+
+  def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
+                      files):
+    """Uploads the base files (and if necessary, the current ones as well)."""
+
+    def UploadFile(filename, file_id, content, is_binary, status, is_base):
+      """Uploads a file to the server."""
+      file_too_large = False
+      if is_base:
+        type = "base"
+      else:
+        type = "current"
+      if len(content) > MAX_UPLOAD_SIZE:
+        print ("Not uploading the %s file for %s because it's too large." %
+               (type, filename))
+        file_too_large = True
+        content = ""
+      checksum = md5.new(content).hexdigest()
+      if options.verbose > 0 and not file_too_large:
+        print "Uploading %s file for %s" % (type, filename)
+      url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
+      form_fields = [("filename", filename),
+                     ("status", status),
+                     ("checksum", checksum),
+                     ("is_binary", str(is_binary)),
+                     ("is_current", str(not is_base)),
+                    ]
+      if file_too_large:
+        form_fields.append(("file_too_large", "1"))
+      if options.email:
+        form_fields.append(("user", options.email))
+      ctype, body = EncodeMultipartFormData(form_fields,
+                                            [("data", filename, content)])
+      response_body = rpc_server.Send(url, body,
+                                      content_type=ctype)
+      if not response_body.startswith("OK"):
+        StatusUpdate("  --> %s" % response_body)
+        sys.exit(1)
+
+    patches = dict()
+    [patches.setdefault(v, k) for k, v in patch_list]
+    for filename in patches.keys():
+      base_content, new_content, is_binary, status = files[filename]
+      file_id_str = patches.get(filename)
+      if file_id_str.find("nobase") != -1:
+        base_content = None
+        file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
+      file_id = int(file_id_str)
+      if base_content != None:
+        UploadFile(filename, file_id, base_content, is_binary, status, True)
+      if new_content != None:
+        UploadFile(filename, file_id, new_content, is_binary, status, False)
+
+  def IsImage(self, filename):
+    """Returns true if the filename has an image extension."""
+    mimetype =  mimetypes.guess_type(filename)[0]
+    if not mimetype:
+      return False
+    return mimetype.startswith("image/")
+
+
+class SubversionVCS(VersionControlSystem):
+  """Implementation of the VersionControlSystem interface for Subversion."""
+
+  def __init__(self, options):
+    super(SubversionVCS, self).__init__(options)
+    if self.options.revision:
+      match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
+      if not match:
+        ErrorExit("Invalid Subversion revision %s." % self.options.revision)
+      self.rev_start = match.group(1)
+      self.rev_end = match.group(3)
+    else:
+      self.rev_start = self.rev_end = None
+    # Cache output from "svn list -r REVNO dirname".
+    # Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
+    self.svnls_cache = {}
+    # SVN base URL is required to fetch files deleted in an older revision.
+    # Result is cached to not guess it over and over again in GetBaseFile().
+    required = self.options.download_base or self.options.revision is not None
+    self.svn_base = self._GuessBase(required)
+
+  def GuessBase(self, required):
+    """Wrapper for _GuessBase."""
+    return self.svn_base
+
+  def _GuessBase(self, required):
+    """Returns the SVN base URL.
+
+    Args:
+      required: If true, exits if the url can't be guessed, otherwise None is
+        returned.
+    """
+    info = RunShell(["svn", "info"])
+    for line in info.splitlines():
+      words = line.split()
+      if len(words) == 2 and words[0] == "URL:":
+        url = words[1]
+        scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
+        username, netloc = urllib.splituser(netloc)
+        if username:
+          logging.info("Removed username from base URL")
+        if netloc.endswith("svn.python.org"):
+          if netloc == "svn.python.org":
+            if path.startswith("/projects/"):
+              path = path[9:]
+          elif netloc != "pythondev@svn.python.org":
+            ErrorExit("Unrecognized Python URL: %s" % url)
+          base = "http://svn.python.org/view/*checkout*%s/" % path
+          logging.info("Guessed Python base = %s", base)
+        elif netloc.endswith("svn.collab.net"):
+          if path.startswith("/repos/"):
+            path = path[6:]
+          base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
+          logging.info("Guessed CollabNet base = %s", base)
+        elif netloc.endswith(".googlecode.com"):
+          path = path + "/"
+          base = urlparse.urlunparse(("http", netloc, path, params,
+                                      query, fragment))
+          logging.info("Guessed Google Code base = %s", base)
+        else:
+          path = path + "/"
+          base = urlparse.urlunparse((scheme, netloc, path, params,
+                                      query, fragment))
+          logging.info("Guessed base = %s", base)
+        return base
+    if required:
+      ErrorExit("Can't find URL in output from svn info")
+    return None
+
+  def GenerateDiff(self, args):
+    cmd = ["svn", "diff"]
+    if self.options.revision:
+      cmd += ["-r", self.options.revision]
+    cmd.extend(args)
+    data = RunShell(cmd)
+    count = 0
+    for line in data.splitlines():
+      if line.startswith("Index:") or line.startswith("Property changes on:"):
+        count += 1
+        logging.info(line)
+    if not count:
+      ErrorExit("No valid patches found in output from svn diff")
+    return data
+
+  def _CollapseKeywords(self, content, keyword_str):
+    """Collapses SVN keywords."""
+    # svn cat translates keywords but svn diff doesn't. As a result of this
+    # behavior patching.PatchChunks() fails with a chunk mismatch error.
+    # This part was originally written by the Review Board development team
+    # who had the same problem (http://reviews.review-board.org/r/276/).
+    # Mapping of keywords to known aliases
+    svn_keywords = {
+      # Standard keywords
+      'Date':                ['Date', 'LastChangedDate'],
+      'Revision':            ['Revision', 'LastChangedRevision', 'Rev'],
+      'Author':              ['Author', 'LastChangedBy'],
+      'HeadURL':             ['HeadURL', 'URL'],
+      'Id':                  ['Id'],
+
+      # Aliases
+      'LastChangedDate':     ['LastChangedDate', 'Date'],
+      'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
+      'LastChangedBy':       ['LastChangedBy', 'Author'],
+      'URL':                 ['URL', 'HeadURL'],
+    }
+
+    def repl(m):
+       if m.group(2):
+         return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
+       return "$%s$" % m.group(1)
+    keywords = [keyword
+                for name in keyword_str.split(" ")
+                for keyword in svn_keywords.get(name, [])]
+    return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
+
+  def GetUnknownFiles(self):
+    status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
+    unknown_files = []
+    for line in status.split("\n"):
+      if line and line[0] == "?":
+        unknown_files.append(line)
+    return unknown_files
+
+  def ReadFile(self, filename):
+    """Returns the contents of a file."""
+    file = open(filename, 'rb')
+    result = ""
+    try:
+      result = file.read()
+    finally:
+      file.close()
+    return result
+
+  def GetStatus(self, filename):
+    """Returns the status of a file."""
+    if not self.options.revision:
+      status = RunShell(["svn", "status", "--ignore-externals", filename])
+      if not status:
+        ErrorExit("svn status returned no output for %s" % filename)
+      status_lines = status.splitlines()
+      # If file is in a cl, the output will begin with
+      # "\n--- Changelist 'cl_name':\n".  See
+      # http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
+      if (len(status_lines) == 3 and
+          not status_lines[0] and
+          status_lines[1].startswith("--- Changelist")):
+        status = status_lines[2]
+      else:
+        status = status_lines[0]
+    # If we have a revision to diff against we need to run "svn list"
+    # for the old and the new revision and compare the results to get
+    # the correct status for a file.
+    else:
+      dirname, relfilename = os.path.split(filename)
+      if dirname not in self.svnls_cache:
+        cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
+        out, returncode = RunShellWithReturnCode(cmd)
+        if returncode:
+          ErrorExit("Failed to get status for %s." % filename)
+        old_files = out.splitlines()
+        args = ["svn", "list"]
+        if self.rev_end:
+          args += ["-r", self.rev_end]
+        cmd = args + [dirname or "."]
+        out, returncode = RunShellWithReturnCode(cmd)
+        if returncode:
+          ErrorExit("Failed to run command %s" % cmd)
+        self.svnls_cache[dirname] = (old_files, out.splitlines())
+      old_files, new_files = self.svnls_cache[dirname]
+      if relfilename in old_files and relfilename not in new_files:
+        status = "D   "
+      elif relfilename in old_files and relfilename in new_files:
+        status = "M   "
+      else:
+        status = "A   "
+    return status
+
+  def GetBaseFile(self, filename):
+    status = self.GetStatus(filename)
+    base_content = None
+    new_content = None
+
+    # If a file is copied its status will be "A  +", which signifies
+    # "addition-with-history".  See "svn st" for more information.  We need to
+    # upload the original file or else diff parsing will fail if the file was
+    # edited.
+    if status[0] == "A" and status[3] != "+":
+      # We'll need to upload the new content if we're adding a binary file
+      # since diff's output won't contain it.
+      mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
+                          silent_ok=True)
+      base_content = ""
+      is_binary = mimetype and not mimetype.startswith("text/")
+      if is_binary and self.IsImage(filename):
+        new_content = self.ReadFile(filename)
+    elif (status[0] in ("M", "D", "R") or
+          (status[0] == "A" and status[3] == "+") or  # Copied file.
+          (status[0] == " " and status[1] == "M")):  # Property change.
+      args = []
+      if self.options.revision:
+        url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
+      else:
+        # Don't change filename, it's needed later.
+        url = filename
+        args += ["-r", "BASE"]
+      cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
+      mimetype, returncode = RunShellWithReturnCode(cmd)
+      if returncode:
+        # File does not exist in the requested revision.
+        # Reset mimetype, it contains an error message.
+        mimetype = ""
+      get_base = False
+      is_binary = mimetype and not mimetype.startswith("text/")
+      if status[0] == " ":
+        # Empty base content just to force an upload.
+        base_content = ""
+      elif is_binary:
+        if self.IsImage(filename):
+          get_base = True
+          if status[0] == "M":
+            if not self.rev_end:
+              new_content = self.ReadFile(filename)
+            else:
+              url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
+              new_content = RunShell(["svn", "cat", url],
+                                     universal_newlines=True, silent_ok=True)
+        else:
+          base_content = ""
+      else:
+        get_base = True
+
+      if get_base:
+        if is_binary:
+          universal_newlines = False
+        else:
+          universal_newlines = True
+        if self.rev_start:
+          # "svn cat -r REV delete_file.txt" doesn't work. cat requires
+          # the full URL with "@REV" appended instead of using "-r" option.
+          url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
+          base_content = RunShell(["svn", "cat", url],
+                                  universal_newlines=universal_newlines,
+                                  silent_ok=True)
+        else:
+          base_content = RunShell(["svn", "cat", filename],
+                                  universal_newlines=universal_newlines,
+                                  silent_ok=True)
+        if not is_binary:
+          args = []
+          if self.rev_start:
+            url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
+          else:
+            url = filename
+            args += ["-r", "BASE"]
+          cmd = ["svn"] + args + ["propget", "svn:keywords", url]
+          keywords, returncode = RunShellWithReturnCode(cmd)
+          if keywords and not returncode:
+            base_content = self._CollapseKeywords(base_content, keywords)
+    else:
+      StatusUpdate("svn status returned unexpected output: %s" % status)
+      sys.exit(1)
+    return base_content, new_content, is_binary, status[0:5]
+
+
+class GitVCS(VersionControlSystem):
+  """Implementation of the VersionControlSystem interface for Git."""
+
+  def __init__(self, options):
+    super(GitVCS, self).__init__(options)
+    # Map of filename -> hash of base file.
+    self.base_hashes = {}
+
+  def GenerateDiff(self, extra_args):
+    # This is more complicated than svn's GenerateDiff because we must convert
+    # the diff output to include an svn-style "Index:" line as well as record
+    # the hashes of the base files, so we can upload them along with our diff.
+    if self.options.revision:
+      extra_args = [self.options.revision] + extra_args
+    gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
+    svndiff = []
+    filecount = 0
+    filename = None
+    for line in gitdiff.splitlines():
+      match = re.match(r"diff --git a/(.*) b/.*$", line)
+      if match:
+        filecount += 1
+        filename = match.group(1)
+        svndiff.append("Index: %s\n" % filename)
+      else:
+        # The "index" line in a git diff looks like this (long hashes elided):
+        #   index 82c0d44..b2cee3f 100755
+        # We want to save the left hash, as that identifies the base file.
+        match = re.match(r"index (\w+)\.\.", line)
+        if match:
+          self.base_hashes[filename] = match.group(1)
+      svndiff.append(line + "\n")
+    if not filecount:
+      ErrorExit("No valid patches found in output from git diff")
+    return "".join(svndiff)
+
+  def GetUnknownFiles(self):
+    status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
+                      silent_ok=True)
+    return status.splitlines()
+
+  def GetBaseFile(self, filename):
+    hash = self.base_hashes[filename]
+    base_content = None
+    new_content = None
+    is_binary = False
+    if hash == "0" * 40:  # All-zero hash indicates no base file.
+      status = "A"
+      base_content = ""
+    else:
+      status = "M"
+      base_content = RunShell(["git", "show", hash])
+    return (base_content, new_content, is_binary, status)
+
+
+class MercurialVCS(VersionControlSystem):
+  """Implementation of the VersionControlSystem interface for Mercurial."""
+
+  def __init__(self, options, repo_dir):
+    super(MercurialVCS, self).__init__(options)
+    # Absolute path to repository (we can be in a subdir)
+    self.repo_dir = os.path.normpath(repo_dir)
+    # Compute the subdir
+    cwd = os.path.normpath(os.getcwd())
+    assert cwd.startswith(self.repo_dir)
+    self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
+    if self.options.revision:
+      self.base_rev = self.options.revision
+    else:
+      self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
+
+  def _GetRelPath(self, filename):
+    """Get relative path of a file according to the current directory,
+    given its logical path in the repo."""
+    assert filename.startswith(self.subdir), filename
+    return filename[len(self.subdir):].lstrip(r"\/")
+
+  def GenerateDiff(self, extra_args):
+    # If no file specified, restrict to the current subdir
+    extra_args = extra_args or ["."]
+    cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
+    data = RunShell(cmd, silent_ok=True)
+    svndiff = []
+    filecount = 0
+    for line in data.splitlines():
+      m = re.match("diff --git a/(\S+) b/(\S+)", line)
+      if m:
+        # Modify line to make it look like as it comes from svn diff.
+        # With this modification no changes on the server side are required
+        # to make upload.py work with Mercurial repos.
+        # NOTE: for proper handling of moved/copied files, we have to use
+        # the second filename.
+        filename = m.group(2)
+        svndiff.append("Index: %s" % filename)
+        svndiff.append("=" * 67)
+        filecount += 1
+        logging.info(line)
+      else:
+        svndiff.append(line)
+    if not filecount:
+      ErrorExit("No valid patches found in output from hg diff")
+    return "\n".join(svndiff) + "\n"
+
+  def GetUnknownFiles(self):
+    """Return a list of files unknown to the VCS."""
+    args = []
+    status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
+        silent_ok=True)
+    unknown_files = []
+    for line in status.splitlines():
+      st, fn = line.split(" ", 1)
+      if st == "?":
+        unknown_files.append(fn)
+    return unknown_files
+
+  def GetBaseFile(self, filename):
+    # "hg status" and "hg cat" both take a path relative to the current subdir
+    # rather than to the repo root, but "hg diff" has given us the full path
+    # to the repo root.
+    base_content = ""
+    new_content = None
+    is_binary = False
+    oldrelpath = relpath = self._GetRelPath(filename)
+    # "hg status -C" returns two lines for moved/copied files, one otherwise
+    out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
+    out = out.splitlines()
+    # HACK: strip error message about missing file/directory if it isn't in
+    # the working copy
+    if out[0].startswith('%s: ' % relpath):
+      out = out[1:]
+    if len(out) > 1:
+      # Moved/copied => considered as modified, use old filename to
+      # retrieve base contents
+      oldrelpath = out[1].strip()
+      status = "M"
+    else:
+      status, _ = out[0].split(' ', 1)
+    if status != "A":
+      base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
+        silent_ok=True)
+      is_binary = "\0" in base_content  # Mercurial's heuristic
+    if status != "R":
+      new_content = open(relpath, "rb").read()
+      is_binary = is_binary or "\0" in new_content
+    if is_binary and base_content:
+      # Fetch again without converting newlines
+      base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
+        silent_ok=True, universal_newlines=False)
+    if not is_binary or not self.IsImage(relpath):
+      new_content = None
+    return base_content, new_content, is_binary, status
+
+
+# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
+def SplitPatch(data):
+  """Splits a patch into separate pieces for each file.
+
+  Args:
+    data: A string containing the output of svn diff.
+
+  Returns:
+    A list of 2-tuple (filename, text) where text is the svn diff output
+      pertaining to filename.
+  """
+  patches = []
+  filename = None
+  diff = []
+  for line in data.splitlines(True):
+    new_filename = None
+    if line.startswith('Index:'):
+      unused, new_filename = line.split(':', 1)
+      new_filename = new_filename.strip()
+    elif line.startswith('Property changes on:'):
+      unused, temp_filename = line.split(':', 1)
+      # When a file is modified, paths use '/' between directories, however
+      # when a property is modified '\' is used on Windows.  Make them the same
+      # otherwise the file shows up twice.
+      temp_filename = temp_filename.strip().replace('\\', '/')
+      if temp_filename != filename:
+        # File has property changes but no modifications, create a new diff.
+        new_filename = temp_filename
+    if new_filename:
+      if filename and diff:
+        patches.append((filename, ''.join(diff)))
+      filename = new_filename
+      diff = [line]
+      continue
+    if diff is not None:
+      diff.append(line)
+  if filename and diff:
+    patches.append((filename, ''.join(diff)))
+  return patches
+
+
+def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
+  """Uploads a separate patch for each file in the diff output.
+
+  Returns a list of [patch_key, filename] for each file.
+  """
+  patches = SplitPatch(data)
+  rv = []
+  for patch in patches:
+    if len(patch[1]) > MAX_UPLOAD_SIZE:
+      print ("Not uploading the patch for " + patch[0] +
+             " because the file is too large.")
+      continue
+    form_fields = [("filename", patch[0])]
+    if not options.download_base:
+      form_fields.append(("content_upload", "1"))
+    files = [("data", "data.diff", patch[1])]
+    ctype, body = EncodeMultipartFormData(form_fields, files)
+    url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
+    print "Uploading patch for " + patch[0]
+    response_body = rpc_server.Send(url, body, content_type=ctype)
+    lines = response_body.splitlines()
+    if not lines or lines[0] != "OK":
+      StatusUpdate("  --> %s" % response_body)
+      sys.exit(1)
+    rv.append([lines[1], patch[0]])
+  return rv
+
+
+def GuessVCS(options):
+  """Helper to guess the version control system.
+
+  This examines the current directory, guesses which VersionControlSystem
+  we're using, and returns an instance of the appropriate class.  Exit with an
+  error if we can't figure it out.
+
+  Returns:
+    A VersionControlSystem instance. Exits if the VCS can't be guessed.
+  """
+  # Mercurial has a command to get the base directory of a repository
+  # Try running it, but don't die if we don't have hg installed.
+  # NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
+  try:
+    out, returncode = RunShellWithReturnCode(["hg", "root"])
+    if returncode == 0:
+      return MercurialVCS(options, out.strip())
+  except OSError, (errno, message):
+    if errno != 2:  # ENOENT -- they don't have hg installed.
+      raise
+
+  # Subversion has a .svn in all working directories.
+  if os.path.isdir('.svn'):
+    logging.info("Guessed VCS = Subversion")
+    return SubversionVCS(options)
+
+  # Git has a command to test if you're in a git tree.
+  # Try running it, but don't die if we don't have git installed.
+  try:
+    out, returncode = RunShellWithReturnCode(["git", "rev-parse",
+                                              "--is-inside-work-tree"])
+    if returncode == 0:
+      return GitVCS(options)
+  except OSError, (errno, message):
+    if errno != 2:  # ENOENT -- they don't have git installed.
+      raise
+
+  ErrorExit(("Could not guess version control system. "
+             "Are you in a working copy directory?"))
+
+
+def RealMain(argv, data=None):
+  logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
+                              "%(lineno)s %(message)s "))
+  os.environ['LC_ALL'] = 'C'
+  options, args = parser.parse_args(argv[1:])
+  global verbosity
+  verbosity = options.verbose
+  if verbosity >= 3:
+    logging.getLogger().setLevel(logging.DEBUG)
+  elif verbosity >= 2:
+    logging.getLogger().setLevel(logging.INFO)
+  vcs = GuessVCS(options)
+  if isinstance(vcs, SubversionVCS):
+    # base field is only allowed for Subversion.
+    # Note: Fetching base files may become deprecated in future releases.
+    base = vcs.GuessBase(options.download_base)
+  else:
+    base = None
+  if not base and options.download_base:
+    options.download_base = True
+    logging.info("Enabled upload of base file")
+  if not options.assume_yes:
+    vcs.CheckForUnknownFiles()
+  if data is None:
+    data = vcs.GenerateDiff(args)
+  files = vcs.GetBaseFiles(data)
+  if verbosity >= 1:
+    print "Upload server:", options.server, "(change with -s/--server)"
+  if options.issue:
+    prompt = "Message describing this patch set: "
+  else:
+    prompt = "New issue subject: "
+  message = options.message or raw_input(prompt).strip()
+  if not message:
+    ErrorExit("A non-empty message is required")
+  rpc_server = GetRpcServer(options)
+  form_fields = [("subject", message)]
+  if base:
+    form_fields.append(("base", base))
+  if options.issue:
+    form_fields.append(("issue", str(options.issue)))
+  if options.email:
+    form_fields.append(("user", options.email))
+  if options.reviewers:
+    for reviewer in options.reviewers.split(','):
+      if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
+        ErrorExit("Invalid email address: %s" % reviewer)
+    form_fields.append(("reviewers", options.reviewers))
+  if options.cc:
+    for cc in options.cc.split(','):
+      if "@" in cc and not cc.split("@")[1].count(".") == 1:
+        ErrorExit("Invalid email address: %s" % cc)
+    form_fields.append(("cc", options.cc))
+  description = options.description
+  if options.description_file:
+    if options.description:
+      ErrorExit("Can't specify description and description_file")
+    file = open(options.description_file, 'r')
+    description = file.read()
+    file.close()
+  if description:
+    form_fields.append(("description", description))
+  # Send a hash of all the base file so the server can determine if a copy
+  # already exists in an earlier patchset.
+  base_hashes = ""
+  for file, info in files.iteritems():
+    if not info[0] is None:
+      checksum = md5.new(info[0]).hexdigest()
+      if base_hashes:
+        base_hashes += "|"
+      base_hashes += checksum + ":" + file
+  form_fields.append(("base_hashes", base_hashes))
+  # If we're uploading base files, don't send the email before the uploads, so
+  # that it contains the file status.
+  if options.send_mail and options.download_base:
+    form_fields.append(("send_mail", "1"))
+  if not options.download_base:
+    form_fields.append(("content_upload", "1"))
+  if len(data) > MAX_UPLOAD_SIZE:
+    print "Patch is large, so uploading file patches separately."
+    uploaded_diff_file = []
+    form_fields.append(("separate_patches", "1"))
+  else:
+    uploaded_diff_file = [("data", "data.diff", data)]
+  ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
+  response_body = rpc_server.Send("/upload", body, content_type=ctype)
+  if not options.download_base or not uploaded_diff_file:
+    lines = response_body.splitlines()
+    if len(lines) >= 2:
+      msg = lines[0]
+      patchset = lines[1].strip()
+      patches = [x.split(" ", 1) for x in lines[2:]]
+    else:
+      msg = response_body
+  else:
+    msg = response_body
+  StatusUpdate(msg)
+  if not response_body.startswith("Issue created.") and \
+  not response_body.startswith("Issue updated."):
+    sys.exit(0)
+  issue = msg[msg.rfind("/")+1:]
+
+  if not uploaded_diff_file:
+    result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
+    if not options.download_base:
+      patches = result
+
+  if not options.download_base:
+    vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
+    if options.send_mail:
+      rpc_server.Send("/" + issue + "/mail", payload="")
+  return issue, patchset
+
+
+def main():
+  try:
+    RealMain(sys.argv)
+  except KeyboardInterrupt:
+    print
+    StatusUpdate("Interrupted.")
+    sys.exit(1)
+
+
+if __name__ == "__main__":
+  main()