Clone your starred GitHub repositories via Node.js script

GitHub API makes it easy to access lists of things, such as starred repositories which is used in this example.

The idea is to clone all those repositories which the current user (me) has starred, thus marking them something to keep an ey on. To accomplish this, some steps are needed:

  1. Get list of starred repositories
  2. Clone each of them

Not that many steps after all...

Get a list of starred repositories

As stated in the GitHub API documentation, the following request fetches the starred repositories of the currently authenticated user.

GET /user/starred

Thus using it with password based Basic Authentication, it could be used like this:

curl -u paazmaya \
    -o github-paazmaya-starred.json

In order to get your own stars, substitute paazmaya with your own GitHub username. The results are saved in a JSON file called github-paazmaya-starred.json.

The resulting JSON is an array, in which each object represents a given starred repository. By default they are ordered descending, meaning that the most recently starred repository will be listed on the top.

For example today I starred FontCustom/fontcustom, thus is the first item. A snippet shown below.

    "id": 6118575,
    "name": "fontcustom",
    "full_name": "FontCustom/fontcustom",
    "owner": {
      "login": "FontCustom",
      "id": 3375656,
      "avatar_url": "",
      "gravatar_id": null,
      "url": "",
      "html_url": "",
      "followers_url": "",
      "following_url": "{/other_user}",
      "gists_url": "{/gist_id}",
      "starred_url": "{/owner}{/repo}",
      "subscriptions_url": "",
      "organizations_url": "",
      "repos_url": "",
      "events_url": "{/privacy}",
      "received_events_url": "",
      "type": "Organization",
      "site_admin": false
    "private": false,
    "html_url": "",
    "description": "Generate custom icon webfonts from the comfort of the command line.",
    "fork": false,
    "url": "",
    "forks_url": "",
    "keys_url": "{/key_id}",
    "collaborators_url": "{/collaborator}",
    "teams_url": "",
    "hooks_url": "",
    "issue_events_url": "{/number}",
    "events_url": "",
    "assignees_url": "{/user}",
    "branches_url": "{/branch}",
    "tags_url": "",
    "blobs_url": "{/sha}",
    "git_tags_url": "{/sha}",
    "git_refs_url": "{/sha}",
    "trees_url": "{/sha}",
    "statuses_url": "{sha}",
    "languages_url": "",
    "stargazers_url": "",
    "contributors_url": "",
    "subscribers_url": "",
    "subscription_url": "",
    "commits_url": "{/sha}",
    "git_commits_url": "{/sha}",
    "comments_url": "{/number}",
    "issue_comment_url": "{number}",
    "contents_url": "{+path}",
    "compare_url": "{base}...{head}",
    "merges_url": "",
    "archive_url": "{archive_format}{/ref}",
    "downloads_url": "",
    "issues_url": "{/number}",
    "pulls_url": "{/number}",
    "milestones_url": "{/number}",
    "notifications_url": "{?since,all,participating}",
    "labels_url": "{/name}",
    "releases_url": "{/id}",
    "created_at": "2012-10-08T02:14:42Z",
    "updated_at": "2014-02-20T11:14:40Z",
    "pushed_at": "2014-02-20T11:14:40Z",
    "git_url": "git://",
    "ssh_url": "",
    "clone_url": "",
    "svn_url": "",
    "homepage": "",
    "size": 2707,
    "stargazers_count": 1742,
    "watchers_count": 1742,
    "language": "Ruby",
    "has_issues": true,
    "has_downloads": true,
    "has_wiki": true,
    "forks_count": 188,
    "mirror_url": null,
    "open_issues_count": 32,
    "forks": 188,
    "open_issues": 32,
    "watchers": 1742,
    "default_branch": "master",
    "master_branch": "master",
    "permissions": {
      "admin": false,
      "push": false,
      "pull": true
    "id": 14489011,
    "name": "grunt-font-optimizer",
    "full_name": "ActivearkJWT/grunt-font-optimizer",


The properties git_url and clone_url are of interest, as they are the ones mainly used for cloning.

To find all of them would be to use a regular expression to find all of them, or with native JavaScript method JSON.parse() via Node.js.

The regular expression approach for finding all lines that have the required clone_url:

/"clone_url": "(\S+)"/g

A minimal Node.js script could look like, calling it clone-starred.js:

var fs = require('fs');

var raw = fs.readFileSync('github-paazmaya-starred.json', { encoding: 'utf8' });
var data = JSON.parse(raw);

data.forEach(function (item) {

The output of running node clone-starred.js could be something similar to:

Now this list only some of the hundreds of repositories that I have starred during the last four years, what could be the problem?

Due to Pagination, the default request returns only the 30 first items. The maximum number of items is 100 is the given (per_page) parameter is used.

The original request can be rewritten to handle pagination. Please note that the URL should be quoted in order to avoid command line confusion:

curl -u paazmaya "" \
    -o github-paazmaya-starred-1.json

This would give the first 100 items and by incrementing the page parameter additional results can be retrieved.

How to get the total amount of pages available?

You could just keep incrementing the page parameter until the result becomes empty, but there exists another solution. The returned headers contain links of which one points to the last page.

By adding the -i parameter to the curl command, it will include the incoming header information in the resulting output file. However this would break the JSON parsing method. Another parameter for handling incoming headers is -D which saves them in a different file.

Yet another update to the command line:

curl -u paazmaya "" \
    -o github-paazmaya-starred-1.json -D headers.txt

The header could look something similar to, stating that there are three pages available:

Link: <>; rel="next",
  <>; rel="last",
  <>; rel="first",
  <>; rel="prev"

In case your results are already in the last page, for example you have less than 100 stars, the next and last links are missing.

Clone them

Since all the JSON data has been fetched, the previously created Node.js script can be used as a base to trigger Git client for cloning.

Steps that needs to be taken into account:

  • Multiple result pages needs to be combined
  • Simultaneously cloning would use too many resources, thus one cloning process at a time

The previously used script would become, in complete form:

var fs = require('fs');

var pageCount = 3;
var cloneUrls = [];

for (var i = 1; i <= pageCount; ++i) {
  var raw = fs.readFileSync(
    'github-paazmaya-starred-' + i + '.json',
    { encoding: 'utf8' }
  var data = JSON.parse(raw);
  data.forEach(function (item) {

console.log('Amount of starred repositories: ' + cloneUrls.length);

var exec = require('child_process').exec;

var next = function (index) {
  if (index >= cloneUrls.length) {
  var url = cloneUrls[index];
  console.log(index + '\t' + url);

  exec('git clone ' + url, function (error, stdout, stderr) {
    console.log('stdout: ' + stdout);
    console.log('stderr: ' + stderr);
    if (error !== null) {
      console.log('exec error: ' + error);


After running the above script, the current folder should be filled with clones of your starred repositories, assuming git was found in the path.