[dev.ssa] Merge remote-tracking branch 'origin/master' into mergebranch

Conflicts:
	src/cmd/compile/internal/gc/racewalk.go
	src/cmd/internal/obj/stack.go
	src/cmd/internal/obj/x86/obj6.go
	src/runtime/stack.go
	test/nilptr3.go
	test/nosplit.go

Change-Id: Ie6053eb1577fd73e8243651f25c0f1fc765ae660
This commit is contained in:
Keith Randall 2015-11-16 13:20:16 -08:00
commit 4304fbc4d0
749 changed files with 47580 additions and 17149 deletions

2
.gitignore vendored
View File

@ -29,7 +29,7 @@ src/cmd/cgo/zdefaultcc.go
src/cmd/go/zdefaultcc.go
src/cmd/internal/obj/zbootstrap.go
src/go/doc/headscan
src/runtime/zversion.go
src/runtime/internal/sys/zversion.go
src/unicode/maketables
src/*.*/
test/pass.out

49
AUTHORS
View File

@ -2,9 +2,11 @@
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Names should be added to this file as one of
# Organization's name
# Individual's name <submission email address>
# Individual's name <submission email address> <email2> <emailN>
# See CONTRIBUTORS for the meaning of multiple email addresses.
# Please keep the list sorted.
@ -22,6 +24,7 @@ Ainar Garipov <gugl.zadolbal@gmail.com>
Akshat Kumar <seed@mail.nanosouffle.net>
Alan Shreve <alan@inconshreveable.com>
Albert Strasheim <fullung@gmail.com>
Alberto Bertogli <albertito@blitiri.com.ar>
Alberto Donizetti <alb.donizetti@gmail.com>
Alberto García Hierro <alberto@garciahierro.com> <alberto.garcia.hierro@gmail.com>
Aleksandar Dezelin <dezelin@gmail.com>
@ -31,6 +34,7 @@ Alex Jin <toalexjin@gmail.com>
Alex Plugaru <alex@plugaru.org> <alexandru.plugaru@gmail.com>
Alex Schroeder <alex@gnu.org>
Alex Sergeyev <abc@alexsergeyev.com>
Alexander Demakin <alexander.demakin@gmail.com>
Alexander Larsson <alexander.larsson@gmail.com>
Alexander Morozov <lk4d4math@gmail.com>
Alexander Neumann <alexander@bumpern.de>
@ -68,9 +72,11 @@ Andriy Lytvynov <lytvynov.a.v@gmail.com>
Andy Davis <andy@bigandian.com>
Andy Maloney <asmaloney@gmail.com>
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
Angelo Bulfone <mbulfone@gmail.com>
Anh Hai Trinh <anh.hai.trinh@gmail.com>
Anmol Sethi <anmol@aubble.com>
Anschel Schaffer-Cohen <anschelsc@gmail.com>
Anthony Canino <anthony.canino1@gmail.com>
Anthony Eufemio <anthony.eufemio@gmail.com>
Anthony Martin <ality@pbrane.org>
Anthony Starks <ajstarks@gmail.com>
@ -79,6 +85,7 @@ Aram Hăvărneanu <aram@mgk.ro>
Areski Belaid <areski@gmail.com>
Arnaud Ysmal <arnaud.ysmal@gmail.com>
Arne Hormann <arnehormann@gmail.com>
Arnout Engelen <arnout@bzzt.net>
Aron Nopanen <aron.nopanen@gmail.com>
Artyom Pervukhin <artyom.pervukhin@gmail.com>
Arvindh Rajesh Tamilmani <art@a-30.net>
@ -142,6 +149,7 @@ CoreOS, Inc.
Corey Thomasson <cthom.lists@gmail.com>
Cristian Staretu <unclejacksons@gmail.com>
Damian Gryski <dgryski@gmail.com>
Dan Caddigan <goldcaddy77@gmail.com>
Dan Callahan <dan.callahan@gmail.com>
Dan Peterson <dpiddy@gmail.com>
Dan Sinclair <dan.sinclair@gmail.com>
@ -152,6 +160,7 @@ Daniel Krech <eikeon@eikeon.com>
Daniel Lidén <daniel.liden.87@gmail.com>
Daniel Morsing <daniel.morsing@gmail.com>
Daniel Ortiz Pereira da Silva <daniel.particular@gmail.com>
Daniel Skinner <daniel@dasa.cc>
Daniel Theophanes <kardianos@gmail.com>
Darren Elwood <darren@textnode.com>
Dave Cheney <dave@cheney.net>
@ -160,6 +169,7 @@ David Calavera <david.calavera@gmail.com>
David du Colombier <0intro@gmail.com>
David Forsythe <dforsythe@gmail.com>
David G. Andersen <dave.andersen@gmail.com>
David Howden <dhowden@gmail.com>
David Jakob Fritz <david.jakob.fritz@gmail.com>
David Leon Gil <coruus@gmail.com>
David R. Jenni <david.r.jenni@gmail.com>
@ -180,6 +190,7 @@ Dmitri Shuralyov <shurcooL@gmail.com>
Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
Dmitry Chestnykh <dchest@gmail.com>
Dmitry Savintsev <dsavints@gmail.com>
Dmitry Yakunin <nonamezeil@gmail.com>
Dominik Honnef <dominik.honnef@gmail.com>
Donald Huang <don.hcd@gmail.com>
Donovan Hide <donovanhide@gmail.com>
@ -194,12 +205,14 @@ Ehren Kret <ehren.kret@gmail.com>
Eivind Uggedal <eivind@uggedal.com>
Elias Naur <elias.naur@gmail.com>
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
Emmanuel Odeke <emm.odeke@gmail.com> <odeke@ualberta.ca>
Eoghan Sherry <ejsherry@gmail.com>
Eric Clark <zerohp@gmail.com>
Eric Lagergren <ericscottlagergren@gmail.com>
Eric Milliken <emilliken@gmail.com>
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
Erik Aigner <aigner.erik@gmail.com>
Erik Dubbelboer <erik@dubbelboer.com>
Erik St. Martin <alakriti@gmail.com>
Erik Westrup <erik.westrup@gmail.com>
Esko Luontola <esko.luontola@gmail.com>
@ -217,12 +230,15 @@ Firmansyah Adiputra <frm.adiputra@gmail.com>
Florian Uekermann <florian@uekermann-online.de>
Florian Weimer <fw@deneb.enyo.de>
Florin Patan <florinpatan@gmail.com>
Ford Hurley <ford.hurley@gmail.com>
Francisco Claude <fclaude@recoded.cl>
Francisco Souza <franciscossouza@gmail.com>
Frederick Kelly Mayle III <frederickmayle@gmail.com>
Fredrik Enestad <fredrik.enestad@soundtrackyourbrand.com>
Frithjof Schulze <schulze@math.uni-hannover.de> <sfrithjof@gmail.com>
Gabriel Aszalos <gabriel.aszalos@gmail.com>
Gary Burd <gary@beagledreams.com>
Gaurish Sharma <contact@gaurishsharma.com>
Gautham Thambidorai <gautham.dorai@gmail.com>
Geert-Johan Riemer <gjr19912@gmail.com>
Georg Reinke <guelfey@gmail.com>
@ -252,7 +268,9 @@ Henning Schmiedehausen <henning@schmiedehausen.org>
Henrik Edwards <henrik.edwards@gmail.com>
Herbert Georg Fischer <herbert.fischer@gmail.com>
Hong Ruiqi <hongruiqi@gmail.com>
Hsin-Ho Yeh <yhh92u@gmail.com>
Hu Keping <hukeping@huawei.com>
Ian Gudger <ian@loosescre.ws>
IBM
Icarus Sparry <golang@icarus.freeuk.com>
Igneous Systems, Inc.
@ -306,6 +324,7 @@ John Asmuth <jasmuth@gmail.com>
John C Barstow <jbowtie@amathaine.com>
John Graham-Cumming <jgc@jgc.org> <jgrahamc@gmail.com>
John Howard Palevich <jack.palevich@gmail.com>
John Jenkins <twodopeshaggy@gmail.com>
John Potocny <johnp@vividcortex.com>
John Shahid <jvshahid@gmail.com>
John Tuley <john@tuley.org>
@ -339,7 +358,9 @@ Kelvin Foo Chuan Lyi <vmirage@gmail.com>
Ken Friedenbach <kenliz@cruzio.com>
Ken Rockot <ken@oz.gs>
Ken Sedgwick <ken@bonsai.com>
Kenny Grant <kennygrant@gmail.com>
Kevin Ballard <kevin@sb.org>
Klaus Post <klauspost@gmail.com>
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
KPCompass, Inc.
Kristopher Watts <traetox@gmail.com>
@ -351,6 +372,7 @@ L Campbell <unpantsu@gmail.com>
Lai Jiangshan <eag0628@gmail.com>
Larz Conwell <larzconwell@gmail.com>
Lee Packham <lpackham@gmail.com>
Lewin Bormann <lewin.bormann@gmail.com>
Liberty Fund Inc
Linaro Limited
Lloyd Dewolf <foolswisdom@gmail.com>
@ -362,6 +384,7 @@ Luit van Drongelen <luitvd@gmail.com>
Luka Zakrajšek <tr00.g33k@gmail.com>
Luke Curley <qpingu@gmail.com>
Mal Curtis <mal@mal.co.nz>
Manu S Ajith <neo@codingarena.in>
Manuel Mendez <mmendez534@gmail.com>
Marc Weistroff <marc@weistroff.net>
Marco Hennings <marco.hennings@freiheit.com>
@ -383,6 +406,7 @@ Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
Mats Lidell <mats.lidell@cag.se>
Matt Aimonetti <mattaimonetti@gmail.com>
Matt Bostock <matt@mattbostock.com>
Matt Drollette <matt@drollette.com>
Matt Jibson <matt.jibson@gmail.com>
Matt Joiner <anacrolix@gmail.com>
Matt Layher <mdlayher@gmail.com>
@ -395,7 +419,9 @@ Matthew Holt <Matthew.Holt+git@gmail.com>
Matthew Horsnell <matthew.horsnell@gmail.com>
Maxim Khitrov <max@mxcrypt.com>
Meir Fischer <meirfischer@gmail.com>
Meng Zhuo <mengzhuo1203@gmail.com>
Meteor Development Group
Mhd Sulhan <m.shulhan@gmail.com>
Micah Stetson <micah.stetson@gmail.com>
Michael Chaten <mchaten@gmail.com>
Michael Elkins <michael.elkins@gmail.com>
@ -422,12 +448,15 @@ Miki Tebeka <miki.tebeka@gmail.com>
Mikio Hara <mikioh.mikioh@gmail.com>
Mikkel Krautz <mikkel@krautz.dk>
Miquel Sabaté Solà <mikisabate@gmail.com>
Mohit Agarwal <mohit@sdf.org>
Moov Corporation
Moriyoshi Koizumi <mozo@mozo.jp>
Môshe van der Sterre <moshevds@gmail.com>
Nan Deng <monnand@gmail.com>
Nathan John Youngman <nj@nathany.com>
Nathan Otterness <otternes@cs.unc.edu>
Nathan P Finch <nate.finch@gmail.com>
Nathan VanBenschoten <nvanbenschoten@gmail.com>
Nathan Youngman <git@nathany.com>
Neelesh Chandola <neelesh.c98@gmail.com>
Nevins Bartolomeo <nevins.bartolomeo@gmail.com>
@ -465,10 +494,12 @@ Patrick Smith <pat42smith@gmail.com>
Paul A Querna <paul.querna@gmail.com>
Paul Hammond <paul@paulhammond.org>
Paul Lalonde <paul.a.lalonde@gmail.com>
Paul Meyer <paul.meyer@microsoft.com>
Paul Rosania <paul.rosania@gmail.com>
Paul Sbarra <Sbarra.Paul@gmail.com>
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
Paul van Brouwershaven <paul@vanbrouwershaven.com>
Pavel Paulau <pavel.paulau@gmail.com>
Pavel Zinovkin <pavel.zinovkin@gmail.com>
Pawel Knap <pawelknap88@gmail.com>
Percy Wegmann <ox.to.a.cart@gmail.com>
@ -491,8 +522,10 @@ Pietro Gagliardi <pietro10@mac.com>
Preetam Jinka <pj@preet.am>
Quan Yong Zhai <qyzhai@gmail.com>
Quoc-Viet Nguyen <afelion@gmail.com>
RackTop Systems Inc.
Raif S. Naffah <go@naffah-raif.name>
Rajat Goel <rajat.goel2010@gmail.com>
Ralph Corderoy <ralph@inputplus.co.uk>
Red Hat, Inc.
Reinaldo de Souza Jr <juniorz@gmail.com>
Rémy Oudompheng <oudomphe@phare.normalesup.org>
@ -502,6 +535,7 @@ Richard Eric Gavaletz <gavaletz@gmail.com>
Richard Musiol <mail@richard-musiol.de>
Rick Arnold <rickarnoldjr@gmail.com>
Risto Jaakko Saarelma <rsaarelm@gmail.com>
Rob Norman <rob.norman@infinitycloud.com>
Robert Daniel Kortschak <dan.kortschak@adelaide.edu.au>
Robert Dinu <r@varp.se>
Robert Figueiredo <robfig@gmail.com>
@ -528,11 +562,12 @@ Sanjay Menakuru <balasanjay@gmail.com>
Scott Barron <scott.barron@github.com>
Scott Ferguson <scottwferg@gmail.com>
Scott Lawrence <bytbox@gmail.com>
Sebastien Binet <seb.binet@gmail.com>
Sebastien Binet <seb.binet@gmail.com>
Sébastien Paolacci <sebastien.paolacci@gmail.com>
Sergei Skorobogatov <skorobo@rambler.ru>
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
Sergio Luis O. B. Correia <sergio@correia.cc>
Seth Hoenig <seth.a.hoenig@gmail.com>
Shane Hansen <shanemhansen@gmail.com>
Shaozhen Ding <dsz0111@gmail.com>
Shawn Smith <shawn.p.smith@gmail.com>
@ -541,6 +576,7 @@ Shivakumar GN <shivakumar.gn@gmail.com>
Silvan Jegen <s.jegen@gmail.com>
Simon Whitehead <chemnova@gmail.com>
Sokolov Yura <funny.falcon@gmail.com>
Spencer Nelson <s@spenczar.com>
Spring Mc <heresy.mc@gmail.com>
Square, Inc.
StalkR <stalkr@stalkr.net>
@ -550,6 +586,7 @@ Stéphane Travostino <stephane.travostino@gmail.com>
Stephen McQuay <stephen@mcquay.me>
Stephen Weinberg <stephen@q5comm.com>
Steve McCoy <mccoyst@gmail.com>
Steve Phillips <elimisteve@gmail.com>
Steve Streeting <steve@stevestreeting.com>
Steven Elliot Harris <seharris@gmail.com>
Steven Hartland <steven.hartland@multiplay.co.uk>
@ -568,6 +605,7 @@ Thiago Fransosi Farina <thiago.farina@gmail.com>
Thomas Alan Copeland <talan.copeland@gmail.com>
Thomas Desrosiers <thomasdesr@gmail.com>
Thomas Kappler <tkappler@gmail.com>
Thorben Krueger <thorben.krueger@gmail.com>
Tim Cooijmans <timcooijmans@gmail.com>
Timo Savola <timo.savola@gmail.com>
Timo Truyts <alkaloid.btx@gmail.com>
@ -577,6 +615,7 @@ Tom Heng <zhm20070928@gmail.com>
Tom Linford <tomlinford@gmail.com>
Tommy Schaefer <tommy.schaefer@teecom.com>
Tor Andersson <tor.andersson@gmail.com>
Tormod Erevik Lea <tormodlea@gmail.com>
Totoro W <tw19881113@gmail.com>
Travis Cline <travis.cline@gmail.com>
Trey Tacon <ttacon@gmail.com>
@ -601,6 +640,8 @@ William Orr <will@worrbase.com> <ay1244@gmail.com>
Xia Bin <snyh@snyh.org>
Xing Xing <mikespook@gmail.com>
Yann Kerhervé <yann.kerherve@gmail.com>
Yao Zhang <lunaria21@gmail.com>
Yasuharu Goto <matope.ono@gmail.com>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Yesudeep Mangalapilly <yesudeep@google.com>
Yissakhar Z. Beck <yissakhar.beck@gmail.com>

View File

@ -22,12 +22,13 @@
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Individual's name <submission email address>
# Individual's name <submission email address> <email2> <emailN>
#
# An entry with two email addresses specifies that the
# An entry with multiple email addresses specifies that the
# first address should be used in the submit logs and
# that the second address should be recognized as the
# same person when interacting with Rietveld.
# that the other addresses should be recognized as the
# same person when interacting with Gerrit.
# Please keep the list sorted.
@ -48,6 +49,7 @@ Akshat Kumar <seed@mail.nanosouffle.net>
Alan Donovan <adonovan@google.com>
Alan Shreve <alan@inconshreveable.com>
Albert Strasheim <fullung@gmail.com>
Alberto Bertogli <albertito@blitiri.com.ar>
Alberto Donizetti <alb.donizetti@gmail.com>
Alberto García Hierro <alberto@garciahierro.com> <alberto.garcia.hierro@gmail.com>
Aleksandar Dezelin <dezelin@gmail.com>
@ -58,6 +60,7 @@ Alex Jin <toalexjin@gmail.com>
Alex Plugaru <alex@plugaru.org> <alexandru.plugaru@gmail.com>
Alex Schroeder <alex@gnu.org>
Alex Sergeyev <abc@alexsergeyev.com>
Alexander Demakin <alexander.demakin@gmail.com>
Alexander Larsson <alexander.larsson@gmail.com>
Alexander Morozov <lk4d4math@gmail.com>
Alexander Neumann <alexander@bumpern.de>
@ -102,9 +105,11 @@ Andriy Lytvynov <lytvynov.a.v@gmail.com>
Andy Davis <andy@bigandian.com>
Andy Maloney <asmaloney@gmail.com>
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
Angelo Bulfone <mbulfone@gmail.com>
Anh Hai Trinh <anh.hai.trinh@gmail.com>
Anmol Sethi <anmol@aubble.com>
Anschel Schaffer-Cohen <anschelsc@gmail.com>
Anthony Canino <anthony.canino1@gmail.com>
Anthony Eufemio <anthony.eufemio@gmail.com>
Anthony Martin <ality@pbrane.org>
Anthony Starks <ajstarks@gmail.com>
@ -113,6 +118,7 @@ Aram Hăvărneanu <aram@mgk.ro>
Areski Belaid <areski@gmail.com>
Arnaud Ysmal <arnaud.ysmal@gmail.com>
Arne Hormann <arnehormann@gmail.com>
Arnout Engelen <arnout@bzzt.net>
Aron Nopanen <aron.nopanen@gmail.com>
Artyom Pervukhin <artyom.pervukhin@gmail.com>
Arvindh Rajesh Tamilmani <art@a-30.net>
@ -167,6 +173,7 @@ Cary Hull <chull@google.com>
Case Nelson <case.nelson@gmail.com>
Casey Marshall <casey.marshall@gmail.com>
Catalin Patulea <catalinp@google.com>
Cedric Staub <cs@squareup.com>
Cezar Sá Espinola <cezarsa@gmail.com>
ChaiShushan <chaishushan@gmail.com>
Charles L. Dorian <cldorian@gmail.com>
@ -201,7 +208,9 @@ Cosmos Nicolaou <cnicolaou@google.com>
Cristian Staretu <unclejacksons@gmail.com>
Damian Gryski <dgryski@gmail.com>
Damien Neil <dneil@google.com>
Dan Caddigan <goldcaddy77@gmail.com>
Dan Callahan <dan.callahan@gmail.com>
Dan Jacques <dnj@google.com>
Dan Peterson <dpiddy@gmail.com>
Dan Pupius <dan@medium.com>
Dan Sinclair <dan.sinclair@gmail.com>
@ -213,6 +222,7 @@ Daniel Lidén <daniel.liden.87@gmail.com>
Daniel Morsing <daniel.morsing@gmail.com>
Daniel Nadasi <dnadasi@google.com>
Daniel Ortiz Pereira da Silva <daniel.particular@gmail.com>
Daniel Skinner <daniel@dasa.cc>
Daniel Theophanes <kardianos@gmail.com>
Darren Elwood <darren@textnode.com>
Dave Borowitz <dborowitz@google.com>
@ -231,6 +241,7 @@ David du Colombier <0intro@gmail.com>
David Forsythe <dforsythe@gmail.com>
David G. Andersen <dave.andersen@gmail.com>
David Glasser <glasser@meteor.com>
David Howden <dhowden@gmail.com>
David Jakob Fritz <david.jakob.fritz@gmail.com>
David Leon Gil <coruus@gmail.com>
David McLeish <davemc@google.com>
@ -254,6 +265,7 @@ Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
Dmitriy Vyukov <dvyukov@google.com>
Dmitry Chestnykh <dchest@gmail.com>
Dmitry Savintsev <dsavints@gmail.com>
Dmitry Yakunin <nonamezeil@gmail.com>
Dominik Honnef <dominik.honnef@gmail.com>
Dominik Vogt <vogt@linux.vnet.ibm.com>
Donald Huang <don.hcd@gmail.com>
@ -270,16 +282,20 @@ Ehren Kret <ehren.kret@gmail.com>
Eivind Uggedal <eivind@uggedal.com>
Elias Naur <elias.naur@gmail.com>
Emil Hessman <c.emil.hessman@gmail.com> <emil@hessman.se>
Emmanuel Odeke <emm.odeke@gmail.com> <odeke@ualberta.ca>
Eoghan Sherry <ejsherry@gmail.com>
Eric Clark <zerohp@gmail.com>
Eric Garrido <ekg@google.com>
Eric Koleda <ekoleda+devrel@google.com>
Eric Lagergren <ericscottlagergren@gmail.com>
Eric Milliken <emilliken@gmail.com>
Eric Roshan-Eisner <eric.d.eisner@gmail.com>
Erik Aigner <aigner.erik@gmail.com>
Erik Dubbelboer <erik@dubbelboer.com>
Erik St. Martin <alakriti@gmail.com>
Erik Westrup <erik.westrup@gmail.com>
Esko Luontola <esko.luontola@gmail.com>
Evan Broder <evan@stripe.com>
Evan Brown <evanbrown@google.com>
Evan Kroske <evankroske@google.com>
Evan Martin <evan.martin@gmail.com>
@ -298,7 +314,9 @@ Florian Uekermann <florian@uekermann-online.de> <f1@uekermann-online.de>
Florian Weimer <fw@deneb.enyo.de>
Florin Patan <florinpatan@gmail.com>
Folke Behrens <folke@google.com>
Ford Hurley <ford.hurley@gmail.com>
Francesc Campoy <campoy@golang.org>
Francisco Claude <fclaude@recoded.cl>
Francisco Souza <franciscossouza@gmail.com>
Frederick Kelly Mayle III <frederickmayle@gmail.com>
Fredrik Enestad <fredrik.enestad@soundtrackyourbrand.com>
@ -308,6 +326,7 @@ Gaal Yahas <gaal@google.com>
Gabriel Aszalos <gabriel.aszalos@gmail.com>
Garrick Evans <garrick@google.com>
Gary Burd <gary@beagledreams.com> <gary.burd@gmail.com>
Gaurish Sharma <contact@gaurishsharma.com>
Gautham Thambidorai <gautham.dorai@gmail.com>
Geert-Johan Riemer <gjr19912@gmail.com>
Georg Reinke <guelfey@gmail.com>
@ -342,11 +361,14 @@ Henrik Edwards <henrik.edwards@gmail.com>
Herbert Georg Fischer <herbert.fischer@gmail.com>
Hong Ruiqi <hongruiqi@gmail.com>
Hossein Sheikh Attar <hattar@google.com>
Hsin-Ho Yeh <yhh92u@gmail.com>
Hu Keping <hukeping@huawei.com>
Hyang-Ah Hana Kim <hakim@google.com> <hyangah@gmail.com>
Ian Gudger <ian@loosescre.ws>
Ian Lance Taylor <iant@golang.org>
Icarus Sparry <golang@icarus.freeuk.com>
Igor Dolzhikov <bluesriverz@gmail.com>
Ilya Tocar <ilya.tocar@intel.com>
INADA Naoki <songofacandy@gmail.com>
Ingo Krabbe <ikrabbe.ask@gmail.com>
Ingo Oeser <nightlyone@googlemail.com> <nightlyone@gmail.com>
@ -416,6 +438,7 @@ John DeNero <denero@google.com>
John Dethridge <jcd@golang.org>
John Graham-Cumming <jgc@jgc.org> <jgrahamc@gmail.com>
John Howard Palevich <jack.palevich@gmail.com>
John Jenkins <twodopeshaggy@gmail.com>
John Newlin <jnewlin@google.com>
John Potocny <johnp@vividcortex.com>
John Shahid <jvshahid@gmail.com>
@ -464,9 +487,13 @@ Ken Friedenbach <kenliz@cruzio.com>
Ken Rockot <ken@oz.gs> <ken.rockot@gmail.com>
Ken Sedgwick <ken@bonsai.com>
Ken Thompson <ken@golang.org>
Kenny Grant <kennygrant@gmail.com>
Kevin Ballard <kevin@sb.org>
Kevin Klues <klueska@gmail.com> <klueska@google.com>
Kevin Malachowski <chowski@google.com>
Kim Shrier <kshrier@racktopsystems.com>
Kirklin McDonald <kirklin.mcdonald@gmail.com>
Klaus Post <klauspost@gmail.com>
Konstantin Shaposhnikov <k.shaposhnikov@gmail.com>
Kristopher Watts <traetox@gmail.com>
Kun Li <likunarmstrong@gmail.com>
@ -478,6 +505,7 @@ Lai Jiangshan <eag0628@gmail.com>
Larry Hosken <lahosken@golang.org>
Larz Conwell <larzconwell@gmail.com>
Lee Packham <lpackham@gmail.com>
Lewin Bormann <lewin.bormann@gmail.com>
Lloyd Dewolf <foolswisdom@gmail.com>
Lorenzo Stoakes <lstoakes@gmail.com>
Louis Kruger <louisk@google.com>
@ -493,6 +521,7 @@ Lynn Boger <laboger@linux.vnet.ibm.com>
Mal Curtis <mal@mal.co.nz>
Manoj Dayaram <platform-dev@moovweb.com> <manoj.dayaram@moovweb.com>
Manu Garg <manugarg@google.com>
Manu S Ajith <neo@codingarena.in>
Manuel Mendez <mmendez534@gmail.com>
Marc Weistroff <marc@weistroff.net>
Marcel van Lohuizen <mpvl@golang.org>
@ -519,6 +548,7 @@ Mats Lidell <mats.lidell@cag.se> <mats.lidell@gmail.com>
Matt Aimonetti <mattaimonetti@gmail.com>
Matt Bostock <matt@mattbostock.com>
Matt Brown <mdbrown@google.com>
Matt Drollette <matt@drollette.com>
Matt Jibson <matt.jibson@gmail.com>
Matt Joiner <anacrolix@gmail.com>
Matt Jones <mrjones@google.com>
@ -535,6 +565,8 @@ Maxim Khitrov <max@mxcrypt.com>
Maxim Pimenov <mpimenov@google.com>
Maxim Ushakov <ushakov@google.com>
Meir Fischer <meirfischer@gmail.com>
Meng Zhuo <mengzhuo1203@gmail.com>
Mhd Sulhan <m.shulhan@gmail.com>
Micah Stetson <micah.stetson@gmail.com>
Michael Chaten <mchaten@gmail.com>
Michael Elkins <michael.elkins@gmail.com>
@ -573,12 +605,15 @@ Miki Tebeka <miki.tebeka@gmail.com>
Mikio Hara <mikioh.mikioh@gmail.com>
Mikkel Krautz <mikkel@krautz.dk> <krautz@gmail.com>
Miquel Sabaté Solà <mikisabate@gmail.com>
Mohit Agarwal <mohit@sdf.org>
Moriyoshi Koizumi <mozo@mozo.jp>
Môshe van der Sterre <moshevds@gmail.com>
Mrunal Patel <mrunalp@gmail.com>
Nan Deng <monnand@gmail.com>
Nathan John Youngman <nj@nathany.com>
Nathan Otterness <otternes@cs.unc.edu>
Nathan P Finch <nate.finch@gmail.com>
Nathan VanBenschoten <nvanbenschoten@gmail.com>
Nathan Youngman <git@nathany.com>
Nathan(yinian) Hu <nathanhu@google.com>
Neelesh Chandola <neelesh.c98@gmail.com>
@ -619,13 +654,16 @@ Paul A Querna <paul.querna@gmail.com>
Paul Borman <borman@google.com>
Paul Chang <paulchang@google.com>
Paul Hammond <paul@paulhammond.org>
Paul Hankin <paulhankin@google.com>
Paul Lalonde <paul.a.lalonde@gmail.com>
Paul Marks <pmarks@google.com>
Paul Meyer <paul.meyer@microsoft.com>
Paul Nasrat <pnasrat@google.com>
Paul Rosania <paul.rosania@gmail.com>
Paul Sbarra <Sbarra.Paul@gmail.com>
Paul Smith <paulsmith@pobox.com> <paulsmith@gmail.com>
Paul van Brouwershaven <paul@vanbrouwershaven.com>
Pavel Paulau <pavel.paulau@gmail.com>
Pavel Zinovkin <pavel.zinovkin@gmail.com>
Pawel Knap <pawelknap88@gmail.com>
Pawel Szczur <filemon@google.com>
@ -658,6 +696,7 @@ Quoc-Viet Nguyen <afelion@gmail.com>
Rahul Chaudhry <rahulchaudhry@chromium.org>
Raif S. Naffah <go@naffah-raif.name>
Rajat Goel <rajat.goel2010@gmail.com>
Ralph Corderoy <ralph@inputplus.co.uk>
Raph Levien <raph@google.com>
Raul Silvera <rsilvera@google.com>
Reinaldo de Souza Jr <juniorz@gmail.com>
@ -670,6 +709,7 @@ Rick Arnold <rickarnoldjr@gmail.com>
Rick Hudson <rlh@golang.org>
Risto Jaakko Saarelma <rsaarelm@gmail.com>
Rob Earhart <earhart@google.com>
Rob Norman <rob.norman@infinitycloud.com>
Rob Pike <r@golang.org>
Robert Daniel Kortschak <dan.kortschak@adelaide.edu.au>
Robert Dinu <r@varp.se>
@ -717,6 +757,7 @@ Sébastien Paolacci <sebastien.paolacci@gmail.com>
Sergei Skorobogatov <skorobo@rambler.ru>
Sergey 'SnakE' Gromov <snake.scaly@gmail.com>
Sergio Luis O. B. Correia <sergio@correia.cc>
Seth Hoenig <seth.a.hoenig@gmail.com>
Shane Hansen <shanemhansen@gmail.com>
Shaozhen Ding <dsz0111@gmail.com>
Shawn Ledbetter <sledbetter@google.com>
@ -728,6 +769,7 @@ Shun Fan <sfan@google.com>
Silvan Jegen <s.jegen@gmail.com>
Simon Whitehead <chemnova@gmail.com>
Sokolov Yura <funny.falcon@gmail.com>
Spencer Nelson <s@spenczar.com>
Spring Mc <heresy.mc@gmail.com>
Srdjan Petrovic <spetrovic@google.com>
StalkR <stalkr@stalkr.net>
@ -738,6 +780,8 @@ Stephen Ma <stephenm@golang.org>
Stephen McQuay <stephen@mcquay.me>
Stephen Weinberg <stephen@q5comm.com>
Steve McCoy <mccoyst@gmail.com>
Steve Newman <snewman@google.com>
Steve Phillips <elimisteve@gmail.com>
Steve Streeting <steve@stevestreeting.com>
Steven Elliot Harris <seharris@gmail.com>
Steven Hartland <steven.hartland@multiplay.co.uk>
@ -758,6 +802,7 @@ Thomas Alan Copeland <talan.copeland@gmail.com>
Thomas Desrosiers <thomasdesr@gmail.com>
Thomas Habets <habets@google.com>
Thomas Kappler <tkappler@gmail.com>
Thorben Krueger <thorben.krueger@gmail.com>
Tim Cooijmans <timcooijmans@gmail.com>
Tim Hockin <thockin@google.com>
Timo Savola <timo.savola@gmail.com>
@ -770,6 +815,7 @@ Tom Linford <tomlinford@gmail.com>
Tom Szymanski <tgs@google.com>
Tommy Schaefer <tommy.schaefer@teecom.com>
Tor Andersson <tor.andersson@gmail.com>
Tormod Erevik Lea <tormodlea@gmail.com>
Totoro W <tw19881113@gmail.com>
Travis Cline <travis.cline@gmail.com>
Trevor Strohman <trevor.strohman@gmail.com>
@ -793,6 +839,7 @@ Vlad Krasnov <vlad@cloudflare.com>
Vladimir Nikishenko <vova616@gmail.com>
Volker Dobler <dr.volker.dobler@gmail.com>
Wei Guangjing <vcc.163@gmail.com>
Will Chan <willchan@google.com>
Will Norris <willnorris@google.com>
Willem van der Schyff <willemvds@gmail.com>
William Chan <willchan@chromium.org>
@ -802,12 +849,15 @@ Xia Bin <snyh@snyh.org>
Xing Xing <mikespook@gmail.com>
Yan Zou <yzou@google.com>
Yann Kerhervé <yann.kerherve@gmail.com>
Yao Zhang <lunaria21@gmail.com>
Yasuharu Goto <matope.ono@gmail.com>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Yesudeep Mangalapilly <yesudeep@google.com>
Yissakhar Z. Beck <yissakhar.beck@gmail.com>
Yo-An Lin <yoanlin93@gmail.com>
Yongjian Xu <i3dmaster@gmail.com>
Yoshiyuki Kanno <nekotaroh@gmail.com> <yoshiyuki.kanno@stoic.co.jp>
Yuki Yugui Sonoda <yugui@google.com>
Yusuke Kagiwada <block.rxckin.beats@gmail.com>
Yuusei Kuwana <kuwana@kumama.org>
Yuval Pavel Zholkover <paulzhol@gmail.com>

View File

@ -2,6 +2,105 @@ pkg bufio, method (*Scanner) Buffer([]uint8, int)
pkg bufio, var ErrFinalToken error
pkg debug/dwarf, const ClassUnknown = 0
pkg debug/dwarf, const ClassUnknown Class
pkg debug/elf, const R_MIPS_16 = 1
pkg debug/elf, const R_MIPS_16 R_MIPS
pkg debug/elf, const R_MIPS_26 = 4
pkg debug/elf, const R_MIPS_26 R_MIPS
pkg debug/elf, const R_MIPS_32 = 2
pkg debug/elf, const R_MIPS_32 R_MIPS
pkg debug/elf, const R_MIPS_64 = 18
pkg debug/elf, const R_MIPS_64 R_MIPS
pkg debug/elf, const R_MIPS_ADD_IMMEDIATE = 34
pkg debug/elf, const R_MIPS_ADD_IMMEDIATE R_MIPS
pkg debug/elf, const R_MIPS_CALL16 = 11
pkg debug/elf, const R_MIPS_CALL16 R_MIPS
pkg debug/elf, const R_MIPS_CALL_HI16 = 30
pkg debug/elf, const R_MIPS_CALL_HI16 R_MIPS
pkg debug/elf, const R_MIPS_CALL_LO16 = 31
pkg debug/elf, const R_MIPS_CALL_LO16 R_MIPS
pkg debug/elf, const R_MIPS_DELETE = 27
pkg debug/elf, const R_MIPS_DELETE R_MIPS
pkg debug/elf, const R_MIPS_GOT16 = 9
pkg debug/elf, const R_MIPS_GOT16 R_MIPS
pkg debug/elf, const R_MIPS_GOT_DISP = 19
pkg debug/elf, const R_MIPS_GOT_DISP R_MIPS
pkg debug/elf, const R_MIPS_GOT_HI16 = 22
pkg debug/elf, const R_MIPS_GOT_HI16 R_MIPS
pkg debug/elf, const R_MIPS_GOT_LO16 = 23
pkg debug/elf, const R_MIPS_GOT_LO16 R_MIPS
pkg debug/elf, const R_MIPS_GOT_OFST = 21
pkg debug/elf, const R_MIPS_GOT_OFST R_MIPS
pkg debug/elf, const R_MIPS_GOT_PAGE = 20
pkg debug/elf, const R_MIPS_GOT_PAGE R_MIPS
pkg debug/elf, const R_MIPS_GPREL16 = 7
pkg debug/elf, const R_MIPS_GPREL16 R_MIPS
pkg debug/elf, const R_MIPS_GPREL32 = 12
pkg debug/elf, const R_MIPS_GPREL32 R_MIPS
pkg debug/elf, const R_MIPS_HI16 = 5
pkg debug/elf, const R_MIPS_HI16 R_MIPS
pkg debug/elf, const R_MIPS_HIGHER = 28
pkg debug/elf, const R_MIPS_HIGHER R_MIPS
pkg debug/elf, const R_MIPS_HIGHEST = 29
pkg debug/elf, const R_MIPS_HIGHEST R_MIPS
pkg debug/elf, const R_MIPS_INSERT_A = 25
pkg debug/elf, const R_MIPS_INSERT_A R_MIPS
pkg debug/elf, const R_MIPS_INSERT_B = 26
pkg debug/elf, const R_MIPS_INSERT_B R_MIPS
pkg debug/elf, const R_MIPS_JALR = 37
pkg debug/elf, const R_MIPS_JALR R_MIPS
pkg debug/elf, const R_MIPS_LITERAL = 8
pkg debug/elf, const R_MIPS_LITERAL R_MIPS
pkg debug/elf, const R_MIPS_LO16 = 6
pkg debug/elf, const R_MIPS_LO16 R_MIPS
pkg debug/elf, const R_MIPS_NONE = 0
pkg debug/elf, const R_MIPS_NONE R_MIPS
pkg debug/elf, const R_MIPS_PC16 = 10
pkg debug/elf, const R_MIPS_PC16 R_MIPS
pkg debug/elf, const R_MIPS_PJUMP = 35
pkg debug/elf, const R_MIPS_PJUMP R_MIPS
pkg debug/elf, const R_MIPS_REL16 = 33
pkg debug/elf, const R_MIPS_REL16 R_MIPS
pkg debug/elf, const R_MIPS_REL32 = 3
pkg debug/elf, const R_MIPS_REL32 R_MIPS
pkg debug/elf, const R_MIPS_RELGOT = 36
pkg debug/elf, const R_MIPS_RELGOT R_MIPS
pkg debug/elf, const R_MIPS_SCN_DISP = 32
pkg debug/elf, const R_MIPS_SCN_DISP R_MIPS
pkg debug/elf, const R_MIPS_SHIFT5 = 16
pkg debug/elf, const R_MIPS_SHIFT5 R_MIPS
pkg debug/elf, const R_MIPS_SHIFT6 = 17
pkg debug/elf, const R_MIPS_SHIFT6 R_MIPS
pkg debug/elf, const R_MIPS_SUB = 24
pkg debug/elf, const R_MIPS_SUB R_MIPS
pkg debug/elf, const R_MIPS_TLS_DTPMOD32 = 38
pkg debug/elf, const R_MIPS_TLS_DTPMOD32 R_MIPS
pkg debug/elf, const R_MIPS_TLS_DTPMOD64 = 40
pkg debug/elf, const R_MIPS_TLS_DTPMOD64 R_MIPS
pkg debug/elf, const R_MIPS_TLS_DTPREL32 = 39
pkg debug/elf, const R_MIPS_TLS_DTPREL32 R_MIPS
pkg debug/elf, const R_MIPS_TLS_DTPREL64 = 41
pkg debug/elf, const R_MIPS_TLS_DTPREL64 R_MIPS
pkg debug/elf, const R_MIPS_TLS_DTPREL_HI16 = 44
pkg debug/elf, const R_MIPS_TLS_DTPREL_HI16 R_MIPS
pkg debug/elf, const R_MIPS_TLS_DTPREL_LO16 = 45
pkg debug/elf, const R_MIPS_TLS_DTPREL_LO16 R_MIPS
pkg debug/elf, const R_MIPS_TLS_GD = 42
pkg debug/elf, const R_MIPS_TLS_GD R_MIPS
pkg debug/elf, const R_MIPS_TLS_GOTTPREL = 46
pkg debug/elf, const R_MIPS_TLS_GOTTPREL R_MIPS
pkg debug/elf, const R_MIPS_TLS_LDM = 43
pkg debug/elf, const R_MIPS_TLS_LDM R_MIPS
pkg debug/elf, const R_MIPS_TLS_TPREL32 = 47
pkg debug/elf, const R_MIPS_TLS_TPREL32 R_MIPS
pkg debug/elf, const R_MIPS_TLS_TPREL64 = 48
pkg debug/elf, const R_MIPS_TLS_TPREL64 R_MIPS
pkg debug/elf, const R_MIPS_TLS_TPREL_HI16 = 49
pkg debug/elf, const R_MIPS_TLS_TPREL_HI16 R_MIPS
pkg debug/elf, const R_MIPS_TLS_TPREL_LO16 = 50
pkg debug/elf, const R_MIPS_TLS_TPREL_LO16 R_MIPS
pkg debug/elf, method (R_MIPS) GoString() string
pkg debug/elf, method (R_MIPS) String() string
pkg debug/elf, type R_MIPS int
pkg html/template, func IsTrue(interface{}) (bool, bool)
pkg image, func NewNYCbCrA(Rectangle, YCbCrSubsampleRatio) *NYCbCrA
pkg image, method (*NYCbCrA) AOffset(int, int) int
@ -38,9 +137,17 @@ pkg net/http, const StatusRequestHeaderFieldsTooLarge = 431
pkg net/http, const StatusRequestHeaderFieldsTooLarge ideal-int
pkg net/http, const StatusTooManyRequests = 429
pkg net/http, const StatusTooManyRequests ideal-int
pkg net/http, type Transport struct, ExpectContinueTimeout time.Duration
pkg net/http, type Transport struct, TLSNextProto map[string]func(string, *tls.Conn) RoundTripper
pkg net/http, var ErrSkipAltProtocol error
pkg net/http/httptest, method (*ResponseRecorder) WriteString(string) (int, error)
pkg net/http/httputil, type BufferPool interface { Get, Put }
pkg net/http/httputil, type BufferPool interface, Get() []uint8
pkg net/http/httputil, type BufferPool interface, Put([]uint8)
pkg net/http/httputil, type ReverseProxy struct, BufferPool BufferPool
pkg net/url, method (*Error) Temporary() bool
pkg net/url, method (*Error) Timeout() bool
pkg os/exec, type ExitError struct, Stderr []uint8
pkg strconv, func AppendQuoteRuneToGraphic([]uint8, int32) []uint8
pkg strconv, func AppendQuoteToGraphic([]uint8, string) []uint8
pkg strconv, func IsGraphic(int32) bool

View File

@ -1,19 +1,35 @@
Tools:
cmd/dist: use clang on FreeBSD (https://golang.org/cl/16635)
cmd/go: vendoring enabled by default (https://golang.org/cl/13967/)
cmd/go: flags for tests must precede package name if present; also makes it easier to pass flags to test binaries (https://golang.org/cl/14826)
cmd/go: add -msan option (https://golang.org/cl/16169)
cmd/go: use shallow clones for new git checkouts (https://golang.org/cl/16360)
cmd/compile: add -msan option (https://golang.org/cl/16160)
cmd/link: add -msan option (https://golang.org/cl/16161)
Ports:
Add new experimental ports for linux/mips64 and linux/mips64le: no cgo, external linking or disasm yet (https://golang.org/cl/14460 and others)
NaCl is no longer restricted to pepper_41 (https://golang.org/cl/13958/)
Reflect change:
cmd/compile/internal/gc: make embedded unexported structs RO (https://golang.org/cl/14085)
encoding/json: check for exported fields in embedded structs (https://golang.org/cl/14011)
encoding/xml: check for exported fields in embedded structs (https://golang.org/cl/14012)
reflect: adjust access to unexported embedded structs (https://golang.org/cl/14010)
API additions and behavior changes:
bufio: add Scanner.Buffer (https://golang.org/cl/14599/)
bufio: add ErrFinalToken as a sentinel value for Scan's split functions (https://golang.org/cl/14924)
crypto/aes: dedicated asm version of AES-GCM (https://golang.org/cl/10484)
fmt: allow any integer type as an argument to the * operator (https://golang.org/cl/14491/)
image: add NYCbCrA types (https://golang.org/cl/15671)
math/rand: add Read (https://golang.org/cl/14522)
net/http: HTTP/2.0 support (many CLs)
net/url: make *url.Error implement net.Error (https://golang.org/cl/15672)
runtime: only one goroutine in traceback (https://golang.org/cl/16512) maybe
strconv: QuoteToGraphic (https://golang.org/cl/14184/)
text/template: ExecError (https://golang.org/cl/13957/)
text/template: trimming spaces (https://golang.org/cl/14391/)

View File

@ -1,6 +1,6 @@
<!--{
"Title": "The Go Programming Language Specification",
"Subtitle": "Version of September 24, 2015",
"Subtitle": "Version of October 20, 2015",
"Path": "/ref/spec"
}-->
@ -558,7 +558,9 @@ and are discussed in that section.
</p>
<p>
Numeric constants represent values of arbitrary precision and do not overflow.
Numeric constants represent exact values of arbitrary precision and do not overflow.
Consequently, there are no constants denoting the IEEE-754 negative zero, infinity,
and not-a-number values.
</p>
<p>
@ -593,16 +595,6 @@ respectively, depending on whether it is a boolean, rune, integer, floating-poin
complex, or string constant.
</p>
<p>
There are no constants denoting the IEEE-754 infinity and not-a-number values,
but the <a href="/pkg/math/"><code>math</code> package</a>'s
<a href="/pkg/math/#Inf">Inf</a>,
<a href="/pkg/math/#NaN">NaN</a>,
<a href="/pkg/math/#IsInf">IsInf</a>, and
<a href="/pkg/math/#IsNaN">IsNaN</a>
functions return and test for those values at run time.
</p>
<p>
Implementation restriction: Although numeric constants have arbitrary
precision in the language, a compiler may implement them using an
@ -3646,12 +3638,12 @@ is also allowed and follows from the general rules above.
</p>
<pre>
const c = 3 &lt; 4 // c is the untyped bool constant true
const c = 3 &lt; 4 // c is the untyped boolean constant true
type MyBool bool
var x, y int
var (
// The result of a comparison is an untyped bool.
// The result of a comparison is an untyped boolean.
// The usual assignment rules apply.
b3 = x == y // b3 has type bool
b4 bool = x == y // b4 has type bool
@ -3795,7 +3787,8 @@ type <code>T</code> in any of these cases:
<code>T</code> is a floating-point type,
and <code>x</code> is representable by a value
of type <code>T</code> after rounding using
IEEE 754 round-to-even rules.
IEEE 754 round-to-even rules, but with an IEEE <code>-0.0</code>
further rounded to an unsigned <code>0.0</code>.
The constant <code>T(x)</code> is the rounded value.
</li>
<li>
@ -3815,6 +3808,7 @@ uint(iota) // iota value of type uint
float32(2.718281828) // 2.718281828 of type float32
complex128(1) // 1.0 + 0.0i of type complex128
float32(0.49999999) // 0.5 of type float32
float64(-1e-1000) // 0.0 of type float64
string('x') // "x" of type string
string(0x266c) // "♬" of type string
MyString("foo" + "bar") // "foobar" of type MyString

View File

@ -71,6 +71,12 @@ architectures.
<dd>
Supports Linux binaries. New in 1.5 and not as well excercised as other ports.
</dd>
<dt>
<code>mips64, mips64le</code> (64-bit MIPS big- and little-endian)
</dt>
<dd>
Supports Linux binaries. New in 1.6 and not as well excercised as other ports.
</dd>
</dl>
<p>
@ -112,7 +118,7 @@ location).
<p>
If you want to install Go 1.5 on a system that is not supported by Go 1.4 (such
as <code>linux/ppc64</code>) you can either use
as <code>linux/ppc64</code> and <code>linux/mips64le</code>) you can either use
<a href="/src/bootstrap.bash">bootstrap.bash</a> on a system that can bootstrap Go
1.5 normally, or bootstrap with gccgo 5.
</p>
@ -405,7 +411,8 @@ Choices for <code>$GOOS</code> are
Choices for <code>$GOARCH</code> are
<code>amd64</code> (64-bit x86, the most mature port),
<code>386</code> (32-bit x86), <code>arm</code> (32-bit ARM), <code>arm64</code> (64-bit ARM),
<code>ppc64le</code> (PowerPC 64-bit, little-endian), and <code>ppc64</code> (PowerPC 64-bit, big-endian).
<code>ppc64le</code> (PowerPC 64-bit, little-endian), <code>ppc64</code> (PowerPC 64-bit, big-endian),
<code>mips64le</code> (MIPS 64-bit, little-endian), and <code>mips64</code> (MIPS 64-bit, big-endian).
The valid combinations of <code>$GOOS</code> and <code>$GOARCH</code> are:
<table cellpadding="0">
<tr>
@ -454,6 +461,12 @@ The valid combinations of <code>$GOOS</code> and <code>$GOARCH</code> are:
<td></td><td><code>linux</code></td> <td><code>ppc64le</code></td>
</tr>
<tr>
<td></td><td><code>linux</code></td> <td><code>mips64</code></td>
</tr>
<tr>
<td></td><td><code>linux</code></td> <td><code>mips64le</code></td>
</tr>
<tr>
<td></td><td><code>netbsd</code></td> <td><code>386</code></td>
</tr>
<tr>

412
misc/cgo/errors/ptr.go Normal file
View File

@ -0,0 +1,412 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Tests that cgo detects invalid pointer passing at runtime.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
)
// ptrTest is the tests without the boilerplate.
type ptrTest struct {
name string // for reporting
c string // the cgo comment
imports []string // a list of imports
support string // supporting functions
body string // the body of the main function
fail bool // whether the test should fail
expensive bool // whether the test requires the expensive check
}
var ptrTests = []ptrTest{
{
// Passing a pointer to a struct that contains a Go pointer.
name: "ptr1",
c: `typedef struct s { int *p; } s; void f(s *ps) {}`,
body: `C.f(&C.s{new(C.int)})`,
fail: true,
},
{
// Passing a pointer to a struct that contains a Go pointer.
name: "ptr2",
c: `typedef struct s { int *p; } s; void f(s *ps) {}`,
body: `p := &C.s{new(C.int)}; C.f(p)`,
fail: true,
},
{
// Passing a pointer to an int field of a Go struct
// that (irrelevantly) contains a Go pointer.
name: "ok1",
c: `struct s { int i; int *p; }; void f(int *p) {}`,
body: `p := &C.struct_s{i: 0, p: new(C.int)}; C.f(&p.i)`,
fail: false,
},
{
// Passing a pointer to a pointer field of a Go struct.
name: "ptr-field",
c: `struct s { int i; int *p; }; void f(int **p) {}`,
body: `p := &C.struct_s{i: 0, p: new(C.int)}; C.f(&p.p)`,
fail: true,
},
{
// Passing a pointer to a pointer field of a Go
// struct, where the field does not contain a Go
// pointer, but another field (irrelevantly) does.
name: "ptr-field-ok",
c: `struct s { int *p1; int *p2; }; void f(int **p) {}`,
body: `p := &C.struct_s{p1: nil, p2: new(C.int)}; C.f(&p.p1)`,
fail: false,
},
{
// Passing the address of a slice with no Go pointers.
name: "slice-ok-1",
c: `void f(void **p) {}`,
imports: []string{"unsafe"},
body: `s := []unsafe.Pointer{nil}; C.f(&s[0])`,
fail: false,
},
{
// Passing the address of a slice with a Go pointer.
name: "slice-ptr-1",
c: `void f(void **p) {}`,
imports: []string{"unsafe"},
body: `i := 0; s := []unsafe.Pointer{unsafe.Pointer(&i)}; C.f(&s[0])`,
fail: true,
},
{
// Passing the address of a slice with a Go pointer,
// where we are passing the address of an element that
// is not a Go pointer.
name: "slice-ptr-2",
c: `void f(void **p) {}`,
imports: []string{"unsafe"},
body: `i := 0; s := []unsafe.Pointer{nil, unsafe.Pointer(&i)}; C.f(&s[0])`,
fail: true,
},
{
// Passing the address of a slice that is an element
// in a struct only looks at the slice.
name: "slice-ok-2",
c: `void f(void **p) {}`,
imports: []string{"unsafe"},
support: `type S struct { p *int; s []unsafe.Pointer }`,
body: `i := 0; p := &S{p:&i, s:[]unsafe.Pointer{nil}}; C.f(&p.s[0])`,
fail: false,
},
{
// Passing the address of a static variable with no
// pointers doesn't matter.
name: "varok",
c: `void f(char** parg) {}`,
support: `var hello = [...]C.char{'h', 'e', 'l', 'l', 'o'}`,
body: `parg := [1]*C.char{&hello[0]}; C.f(&parg[0])`,
fail: false,
},
{
// Passing the address of a static variable with
// pointers does matter.
name: "var",
c: `void f(char*** parg) {}`,
support: `var hello = [...]*C.char{new(C.char)}`,
body: `parg := [1]**C.char{&hello[0]}; C.f(&parg[0])`,
fail: true,
},
/*
TODO(khr): reenable when write barriers are fixed.
{
// Storing a Go pointer into C memory should fail.
name: "barrier",
c: `#include <stdlib.h>
char **f1() { return malloc(sizeof(char*)); }
void f2(char **p) {}`,
body: `p := C.f1(); *p = new(C.char); C.f2(p)`,
fail: true,
expensive: true,
},
{
// Storing a Go pointer into C memory by assigning a
// large value should fail.
name: "barrier-struct",
c: `#include <stdlib.h>
struct s { char *a[10]; };
struct s *f1() { return malloc(sizeof(struct s)); }
void f2(struct s *p) {}`,
body: `p := C.f1(); p.a = [10]*C.char{new(C.char)}; C.f2(p)`,
fail: true,
expensive: true,
},
{
// Storing a Go pointer into C memory using a slice
// copy should fail.
name: "barrier-slice",
c: `#include <stdlib.h>
struct s { char *a[10]; };
struct s *f1() { return malloc(sizeof(struct s)); }
void f2(struct s *p) {}`,
body: `p := C.f1(); copy(p.a[:], []*C.char{new(C.char)}); C.f2(p)`,
fail: true,
expensive: true,
},
{
// A very large value uses a GC program, which is a
// different code path.
name: "barrier-gcprog-array",
c: `#include <stdlib.h>
struct s { char *a[32769]; };
struct s *f1() { return malloc(sizeof(struct s)); }
void f2(struct s *p) {}`,
body: `p := C.f1(); p.a = [32769]*C.char{new(C.char)}; C.f2(p)`,
fail: true,
expensive: true,
},
{
// Similar case, with a source on the heap.
name: "barrier-gcprog-array-heap",
c: `#include <stdlib.h>
struct s { char *a[32769]; };
struct s *f1() { return malloc(sizeof(struct s)); }
void f2(struct s *p) {}
void f3(void *p) {}`,
imports: []string{"unsafe"},
body: `p := C.f1(); n := &[32769]*C.char{new(C.char)}; p.a = *n; C.f2(p); n[0] = nil; C.f3(unsafe.Pointer(n))`,
fail: true,
expensive: true,
},
{
// A GC program with a struct.
name: "barrier-gcprog-struct",
c: `#include <stdlib.h>
struct s { char *a[32769]; };
struct s2 { struct s f; };
struct s2 *f1() { return malloc(sizeof(struct s2)); }
void f2(struct s2 *p) {}`,
body: `p := C.f1(); p.f = C.struct_s{[32769]*C.char{new(C.char)}}; C.f2(p)`,
fail: true,
expensive: true,
},
{
// Similar case, with a source on the heap.
name: "barrier-gcprog-struct-heap",
c: `#include <stdlib.h>
struct s { char *a[32769]; };
struct s2 { struct s f; };
struct s2 *f1() { return malloc(sizeof(struct s2)); }
void f2(struct s2 *p) {}
void f3(void *p) {}`,
imports: []string{"unsafe"},
body: `p := C.f1(); n := &C.struct_s{[32769]*C.char{new(C.char)}}; p.f = *n; C.f2(p); n.a[0] = nil; C.f3(unsafe.Pointer(n))`,
fail: true,
expensive: true,
},
*/
}
func main() {
os.Exit(doTests())
}
func doTests() int {
dir, err := ioutil.TempDir("", "cgoerrors")
if err != nil {
fmt.Fprintln(os.Stderr, err)
return 2
}
defer os.RemoveAll(dir)
workers := runtime.NumCPU() + 1
var wg sync.WaitGroup
c := make(chan int)
errs := make(chan int)
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
worker(dir, c, errs)
wg.Done()
}()
}
for i := range ptrTests {
c <- i
}
close(c)
go func() {
wg.Wait()
close(errs)
}()
tot := 0
for e := range errs {
tot += e
}
return tot
}
func worker(dir string, c, errs chan int) {
e := 0
for i := range c {
if !doOne(dir, i) {
e++
}
}
if e > 0 {
errs <- e
}
}
func doOne(dir string, i int) bool {
t := &ptrTests[i]
name := filepath.Join(dir, fmt.Sprintf("t%d.go", i))
f, err := os.Create(name)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return false
}
b := bufio.NewWriter(f)
fmt.Fprintln(b, `package main`)
fmt.Fprintln(b)
fmt.Fprintln(b, `/*`)
fmt.Fprintln(b, t.c)
fmt.Fprintln(b, `*/`)
fmt.Fprintln(b, `import "C"`)
fmt.Fprintln(b)
for _, imp := range t.imports {
fmt.Fprintln(b, `import "`+imp+`"`)
}
if len(t.imports) > 0 {
fmt.Fprintln(b)
}
if len(t.support) > 0 {
fmt.Fprintln(b, t.support)
fmt.Fprintln(b)
}
fmt.Fprintln(b, `func main() {`)
fmt.Fprintln(b, t.body)
fmt.Fprintln(b, `}`)
if err := b.Flush(); err != nil {
fmt.Fprintf(os.Stderr, "flushing %s: %v\n", name, err)
return false
}
if err := f.Close(); err != nil {
fmt.Fprintln(os.Stderr, "closing %s: %v\n", name, err)
return false
}
ok := true
cmd := exec.Command("go", "run", name)
cmd.Dir = dir
if t.expensive {
cmd.Env = cgocheckEnv("1")
buf, err := cmd.CombinedOutput()
if err != nil {
var errbuf bytes.Buffer
if t.fail {
fmt.Fprintf(&errbuf, "test %s marked expensive but failed when not expensive: %v\n", t.name, err)
} else {
fmt.Fprintf(&errbuf, "test %s failed unexpectedly with GODEBUG=cgocheck=1: %v\n", t.name, err)
}
reportTestOutput(&errbuf, t.name, buf)
os.Stderr.Write(errbuf.Bytes())
ok = false
}
cmd = exec.Command("go", "run", name)
cmd.Dir = dir
}
if t.expensive {
cmd.Env = cgocheckEnv("2")
}
buf, err := cmd.CombinedOutput()
if t.fail {
if err == nil {
var errbuf bytes.Buffer
fmt.Fprintf(&errbuf, "test %s did not fail as expected\n", t.name)
reportTestOutput(&errbuf, t.name, buf)
os.Stderr.Write(errbuf.Bytes())
ok = false
} else if !bytes.Contains(buf, []byte("Go pointer")) {
var errbuf bytes.Buffer
fmt.Fprintf(&errbuf, "test %s output does not contain expected error (failed with %v)\n", t.name, err)
reportTestOutput(&errbuf, t.name, buf)
os.Stderr.Write(errbuf.Bytes())
ok = false
}
} else {
if err != nil {
var errbuf bytes.Buffer
fmt.Fprintf(&errbuf, "test %s failed unexpectedly: %v\n", t.name, err)
reportTestOutput(&errbuf, t.name, buf)
os.Stderr.Write(errbuf.Bytes())
ok = false
}
if !t.expensive && ok {
// Make sure it passes with the expensive checks.
cmd := exec.Command("go", "run", name)
cmd.Dir = dir
cmd.Env = cgocheckEnv("2")
buf, err := cmd.CombinedOutput()
if err != nil {
var errbuf bytes.Buffer
fmt.Fprintf(&errbuf, "test %s failed unexpectedly with expensive checks: %v\n", t.name, err)
reportTestOutput(&errbuf, t.name, buf)
os.Stderr.Write(errbuf.Bytes())
ok = false
}
}
}
if t.fail && ok {
cmd = exec.Command("go", "run", name)
cmd.Dir = dir
cmd.Env = cgocheckEnv("0")
buf, err := cmd.CombinedOutput()
if err != nil {
var errbuf bytes.Buffer
fmt.Fprintf(&errbuf, "test %s failed unexpectedly with GODEBUG=cgocheck=0: %v\n", t.name, err)
reportTestOutput(&errbuf, t.name, buf)
os.Stderr.Write(errbuf.Bytes())
ok = false
}
}
return ok
}
func reportTestOutput(w io.Writer, name string, buf []byte) {
fmt.Fprintf(w, "=== test %s output ===\n", name)
fmt.Fprintf(w, "%s", buf)
fmt.Fprintf(w, "=== end of test %s output ===\n", name)
}
func cgocheckEnv(val string) []string {
env := []string{"GODEBUG=cgocheck=" + val}
for _, e := range os.Environ() {
if !strings.HasPrefix(e, "GODEBUG=") {
env = append(env, e)
}
}
return env
}

View File

@ -34,5 +34,9 @@ check issue8442.go
check issue11097a.go
check issue11097b.go
if ! go run ptr.go; then
exit 1
fi
rm -rf errs _obj
exit 0

View File

@ -19,20 +19,47 @@ import (
"path"
"runtime"
"strings"
"sync"
"testing"
"unsafe"
)
// Pass a func value from nestedCall to goCallback using an integer token.
var callbackMutex sync.Mutex
var callbackToken int
var callbackFuncs = make(map[int]func())
// nestedCall calls into C, back into Go, and finally to f.
func nestedCall(f func()) {
// NOTE: Depends on representation of f.
// callback(x) calls goCallback(x)
C.callback(*(*unsafe.Pointer)(unsafe.Pointer(&f)))
callbackMutex.Lock()
callbackToken++
i := callbackToken
callbackFuncs[i] = f
callbackMutex.Unlock()
// Pass the address of i because the C function was written to
// take a pointer. We could pass an int if we felt like
// rewriting the C code.
C.callback(unsafe.Pointer(&i))
callbackMutex.Lock()
delete(callbackFuncs, i)
callbackMutex.Unlock()
}
//export goCallback
func goCallback(p unsafe.Pointer) {
(*(*func())(unsafe.Pointer(&p)))()
i := *(*int)(p)
callbackMutex.Lock()
f := callbackFuncs[i]
callbackMutex.Unlock()
if f == nil {
panic("missing callback function")
}
f()
}
func testCallback(t *testing.T) {

View File

@ -62,9 +62,11 @@ func Test8811(t *testing.T) { test8811(t) }
func TestReturnAfterGrow(t *testing.T) { testReturnAfterGrow(t) }
func TestReturnAfterGrowFromGo(t *testing.T) { testReturnAfterGrowFromGo(t) }
func Test9026(t *testing.T) { test9026(t) }
func Test9510(t *testing.T) { test9510(t) }
func Test9557(t *testing.T) { test9557(t) }
func Test10303(t *testing.T) { test10303(t, 10) }
func Test11925(t *testing.T) { test11925(t) }
func Test12030(t *testing.T) { test12030(t) }
func TestGCC68255(t *testing.T) { testGCC68255(t) }
func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }

17
misc/cgo/test/gcc68255.go Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgotest
import (
"testing"
"./gcc68255"
)
func testGCC68255(t *testing.T) {
if !gcc68255.F() {
t.Error("C global variable was not initialized")
}
}

View File

@ -0,0 +1,17 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that it's OK to have C code that does nothing other than
// initialize a global variable. This used to fail with gccgo.
package gcc68255
/*
#include "c.h"
*/
import "C"
func F() bool {
return C.v != nil
}

View File

@ -0,0 +1,8 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
static void f(void) {
}
void (*v)(void) = f;

View File

@ -1,5 +1,5 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "abs_amd64.s"
extern void (*v)(void);

View File

@ -6,6 +6,8 @@
package cgotest
import "runtime"
/*
typedef int *intptr;
@ -39,6 +41,10 @@ import (
)
func test10303(t *testing.T, n int) {
if runtime.Compiler == "gccgo" {
t.Skip("gccgo permits C pointers on the stack")
}
// Run at a few different stack depths just to avoid an unlucky pass
// due to variables ending up on different pages.
if n > 0 {

View File

@ -0,0 +1,24 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test that we can link together two different cgo packages that both
// use the same libgcc function.
package cgotest
import (
"runtime"
"testing"
"./issue9510a"
"./issue9510b"
)
func test9510(t *testing.T) {
if runtime.GOARCH == "arm" {
t.Skip("skipping because libgcc may be a Thumb library")
}
issue9510a.F(1, 1)
issue9510b.F(1, 1)
}

View File

@ -0,0 +1,15 @@
package issue9510a
/*
static double csquare(double a, double b) {
__complex__ double d;
__real__ d = a;
__imag__ d = b;
return __real__ (d * d);
}
*/
import "C"
func F(a, b float64) float64 {
return float64(C.csquare(C.double(a), C.double(b)))
}

View File

@ -0,0 +1,15 @@
package issue9510b
/*
static double csquare(double a, double b) {
__complex__ double d;
__real__ d = a;
__imag__ d = b;
return __real__ (d * d);
}
*/
import "C"
func F(a, b float64) float64 {
return float64(C.csquare(C.double(a), C.double(b)))
}

View File

@ -0,0 +1,13 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux,!arm64 netbsd openbsd
package main
import "syscall"
func dup2(oldfd, newfd int) error {
return syscall.Dup2(oldfd, newfd)
}

View File

@ -0,0 +1,13 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux,arm64
package main
import "syscall"
func dup2(oldfd, newfd int) error {
return syscall.Dup3(oldfd, newfd, 0)
}

View File

@ -31,7 +31,7 @@ func init() {
os.Exit(2)
}
if e := syscall.Dup2(p[0], fd); e != nil {
if e := dup2(p[0], fd); e != nil {
fmt.Fprintf(os.Stderr, "dup2: %v\n", e)
os.Exit(2)
}

View File

@ -81,7 +81,7 @@ GOPATH=$(pwd) go install -buildmode=c-shared $suffix libgo
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
binpush libgo.$libext
if [ "$goos" == "linux" ]; then
if [ "$goos" == "linux" ] || [ "$goos" == "android" ] ; then
if readelf -d libgo.$libext | grep TEXTREL >/dev/null; then
echo "libgo.$libext has TEXTREL set"
exit 1
@ -114,7 +114,7 @@ if [ "$output" != "PASS" ]; then
fi
# test2: tests libgo2 which does not export any functions.
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo2.$libext src/libgo2/libgo2.go
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo2.$libext libgo2
binpush libgo2.$libext
linkflags="-Wl,--no-as-needed"
if [ "$goos" == "darwin" ]; then

View File

@ -1,9 +1,10 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#cgo CFLAGS: -fsanitize=memory
#cgo LDFLAGS: -fsanitize=memory
#include <stdint.h>
void f(int32_t *p, int n) {

View File

@ -0,0 +1,35 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
void f(int32_t *p, int n) {
int32_t * volatile q = (int32_t *)malloc(sizeof(int32_t) * n);
memcpy(p, q, n * sizeof(*p));
free(q);
}
void g(int32_t *p, int n) {
if (p[4] != 1) {
abort();
}
}
*/
import "C"
import (
"unsafe"
)
func main() {
a := make([]int32, 10)
C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
a[4] = 1
C.g((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
}

View File

@ -0,0 +1,32 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
extern int *GoFn(void);
// Yes, you can have definitions if you use //export, as long as they are weak.
int f(void) __attribute__ ((weak));
int f() {
int *p = GoFn();
if (*p != 12345)
return 0;
return 1;
}
*/
import "C"
//export GoFn
func GoFn() *C.int {
i := C.int(12345)
return &i
}
func main() {
if r := C.f(); r != 1 {
panic(r)
}
}

View File

@ -0,0 +1,50 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
// The memory profiler can call copy from a slice on the system stack,
// which msan used to think meant a reference to uninitialized memory.
/*
#include <time.h>
#include <unistd.h>
extern void Nop(char*);
// Use weak as a hack to permit defining a function even though we use export.
void poison() __attribute__ ((weak));
// Poison the stack.
void poison() {
char a[1024];
Nop(&a[0]);
}
*/
import "C"
import (
"runtime"
)
func main() {
runtime.MemProfileRate = 1
start(100)
}
func start(i int) {
if i == 0 {
return
}
C.poison()
// Tie up a thread.
// We won't actually wait for this sleep to complete.
go func() { C.sleep(1) }()
start(i - 1)
}
//export Nop
func Nop(*C.char) {
}

View File

@ -0,0 +1,36 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
/*
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
void f(int32_t *p, int n) {
int32_t * volatile q = (int32_t *)malloc(sizeof(int32_t) * n);
memcpy(p, q, n * sizeof(*p));
free(q);
}
void g(int32_t *p, int n) {
if (p[4] != 1) {
// We shouldn't get here; msan should stop us first.
exit(0);
}
}
*/
import "C"
import (
"unsafe"
)
func main() {
a := make([]int32, 10)
C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
a[3] = 1
C.g((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a)))
}

View File

@ -10,15 +10,19 @@ set -e
# The sanitizers were originally developed with clang, so prefer it.
CC=cc
if test "$(type -p clang)" != ""; then
if test -x "$(type -p clang)"; then
CC=clang
fi
export CC
if $CC -fsanitize=memory 2>&1 | grep "unrecognized" >& /dev/null; then
TMPDIR=${TMPDIR:-/tmp}
echo > ${TMPDIR}/testsanitizers$$.c
if $CC -fsanitize=memory -c ${TMPDIR}/testsanitizers$$.c -o ${TMPDIR}/testsanitizers$$.o 2>&1 | grep "unrecognized" >& /dev/null; then
echo "skipping msan test: -fsanitize=memory not supported"
rm -f ${TMPDIR}/testsanitizers$$.*
exit 0
fi
rm -f ${TMPDIR}/testsanitizers$$.*
# The memory sanitizer in versions of clang before 3.6 don't work with Go.
if $CC --version | grep clang >& /dev/null; then
@ -31,4 +35,36 @@ if $CC --version | grep clang >& /dev/null; then
fi
fi
go run msan.go
status=0
if ! go build -msan std; then
echo "FAIL: build -msan std"
status=1
fi
if ! go run -msan msan.go; then
echo "FAIL: msan"
status=1
fi
if ! go run -msan msan2.go; then
echo "FAIL: msan2"
status=1
fi
if ! go run -msan msan3.go; then
echo "FAIL: msan3"
status=1
fi
if ! go run -msan msan4.go; then
echo "FAIL: msan4"
status=1
fi
if go run -msan msan_fail.go 2>/dev/null; then
echo "FAIL: msan_fail"
status=1
fi
exit $status

View File

@ -21,6 +21,7 @@ import (
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
"time"
@ -203,11 +204,14 @@ func TestNoTextrel(t *testing.T) {
}
// The install command should have created a "shlibname" file for the
// listed packages (and runtime/cgo) indicating the name of the shared
// library containing it.
// listed packages (and runtime/cgo, and math on arm) indicating the
// name of the shared library containing it.
func TestShlibnameFiles(t *testing.T) {
pkgs := append([]string{}, minpkgs...)
pkgs = append(pkgs, "runtime/cgo")
if runtime.GOARCH == "arm" {
pkgs = append(pkgs, "math")
}
for _, pkg := range pkgs {
shlibnamefile := filepath.Join(gorootInstallDir, pkg+".shlibname")
contentsb, err := ioutil.ReadFile(shlibnamefile)
@ -359,6 +363,36 @@ func TestCgoExecutable(t *testing.T) {
run(t, "cgo executable", "./bin/execgo")
}
func checkPIE(t *testing.T, name string) {
f, err := elf.Open(name)
if err != nil {
t.Fatal("elf.Open failed: ", err)
}
defer f.Close()
if f.Type != elf.ET_DYN {
t.Errorf("%s has type %v, want ET_DYN", name, f.Type)
}
if hasDynTag(f, elf.DT_TEXTREL) {
t.Errorf("%s has DT_TEXTREL set", name)
}
}
func TestTrivialPIE(t *testing.T) {
name := "trivial_pie"
goCmd(t, "build", "-buildmode=pie", "-o="+name, "trivial")
defer os.Remove(name)
run(t, name, "./"+name)
checkPIE(t, name)
}
func TestCgoPIE(t *testing.T) {
name := "cgo_pie"
goCmd(t, "build", "-buildmode=pie", "-o="+name, "execgo")
defer os.Remove(name)
run(t, name, "./"+name)
checkPIE(t, name)
}
// Build a GOPATH package into a shared library that links against the goroot runtime
// and an executable that links against both.
func TestGopathShlib(t *testing.T) {

View File

@ -103,7 +103,7 @@ func main() {
func getenv(envvar string) string {
s := os.Getenv(envvar)
if s == "" {
log.Fatalf("%s not set\nrun $GOROOT/misc/ios/detect.go to attempt to autodetect", s)
log.Fatalf("%s not set\nrun $GOROOT/misc/ios/detect.go to attempt to autodetect", envvar)
}
return s
}
@ -160,9 +160,6 @@ func run(bin string, args []string) (err error) {
}
defer os.Chdir(oldwd)
type waitPanic struct {
err error
}
defer func() {
if r := recover(); r != nil {
if w, ok := r.(waitPanic); ok {
@ -174,14 +171,96 @@ func run(bin string, args []string) (err error) {
}()
defer exec.Command("killall", "ios-deploy").Run() // cleanup
exec.Command("killall", "ios-deploy").Run()
var opts options
opts, args = parseArgs(args)
// ios-deploy invokes lldb to give us a shell session with the app.
cmd = exec.Command(
s, err := newSession(appdir, args, opts)
if err != nil {
return err
}
defer func() {
b := s.out.Bytes()
if err == nil && !debug {
i := bytes.Index(b, []byte("(lldb) process continue"))
if i > 0 {
b = b[i:]
}
}
os.Stdout.Write(b)
}()
// Script LLDB. Oh dear.
s.do(`process handle SIGHUP --stop false --pass true --notify false`)
s.do(`process handle SIGPIPE --stop false --pass true --notify false`)
s.do(`process handle SIGUSR1 --stop false --pass true --notify false`)
s.do(`process handle SIGSEGV --stop false --pass true --notify false`) // does not work
s.do(`process handle SIGBUS --stop false --pass true --notify false`) // does not work
if opts.lldb {
_, err := io.Copy(s.in, os.Stdin)
if err != io.EOF {
return err
}
return nil
}
s.do(`breakpoint set -n getwd`) // in runtime/cgo/gcc_darwin_arm.go
s.doCmd("run", "stop reason = breakpoint", 20*time.Second)
// Move the current working directory into the faux gopath.
if pkgpath != "src" {
s.do(`breakpoint delete 1`)
s.do(`expr char* $mem = (char*)malloc(512)`)
s.do(`expr $mem = (char*)getwd($mem, 512)`)
s.do(`expr $mem = (char*)strcat($mem, "/` + pkgpath + `")`)
s.do(`call (void)chdir($mem)`)
}
startTestsLen := s.out.Len()
fmt.Fprintln(s.in, `process continue`)
passed := func(out *buf) bool {
// Just to make things fun, lldb sometimes translates \n into \r\n.
return s.out.LastIndex([]byte("\nPASS\n")) > startTestsLen ||
s.out.LastIndex([]byte("\nPASS\r")) > startTestsLen ||
s.out.LastIndex([]byte("\n(lldb) PASS\n")) > startTestsLen ||
s.out.LastIndex([]byte("\n(lldb) PASS\r")) > startTestsLen
}
err = s.wait("test completion", passed, opts.timeout)
if passed(s.out) {
// The returned lldb error code is usually non-zero.
// We check for test success by scanning for the final
// PASS returned by the test harness, assuming the worst
// in its absence.
return nil
}
return err
}
type lldbSession struct {
cmd *exec.Cmd
in *os.File
out *buf
timedout chan struct{}
exited chan error
}
func newSession(appdir string, args []string, opts options) (*lldbSession, error) {
lldbr, in, err := os.Pipe()
if err != nil {
return nil, err
}
s := &lldbSession{
in: in,
out: new(buf),
exited: make(chan error),
}
s.cmd = exec.Command(
// lldb tries to be clever with terminals.
// So we wrap it in script(1) and be clever
// right back at it.
@ -198,267 +277,120 @@ func run(bin string, args []string) (err error) {
"--bundle", appdir,
)
if debug {
log.Println(strings.Join(cmd.Args, " "))
log.Println(strings.Join(s.cmd.Args, " "))
}
lldbr, lldb, err := os.Pipe()
if err != nil {
return err
}
w := new(bufWriter)
var out io.Writer = s.out
if opts.lldb {
mw := io.MultiWriter(w, os.Stderr)
cmd.Stdout = mw
cmd.Stderr = mw
} else {
cmd.Stdout = w
cmd.Stderr = w // everything of interest is on stderr
out = io.MultiWriter(out, os.Stderr)
}
cmd.Stdin = lldbr
s.cmd.Stdout = out
s.cmd.Stderr = out // everything of interest is on stderr
s.cmd.Stdin = lldbr
if err := cmd.Start(); err != nil {
return fmt.Errorf("ios-deploy failed to start: %v", err)
if err := s.cmd.Start(); err != nil {
return nil, fmt.Errorf("ios-deploy failed to start: %v", err)
}
// Manage the -test.timeout here, outside of the test. There is a lot
// of moving parts in an iOS test harness (notably lldb) that can
// swallow useful stdio or cause its own ruckus.
var timedout chan struct{}
if opts.timeout > 1*time.Second {
timedout = make(chan struct{})
s.timedout = make(chan struct{})
time.AfterFunc(opts.timeout-1*time.Second, func() {
close(timedout)
close(s.timedout)
})
}
exited := make(chan error)
go func() {
exited <- cmd.Wait()
s.exited <- s.cmd.Wait()
}()
waitFor := func(stage, str string, timeout time.Duration) error {
cond := func(out *buf) bool {
i0 := s.out.LastIndex([]byte("(lldb)"))
i1 := s.out.LastIndex([]byte("fruitstrap"))
i2 := s.out.LastIndex([]byte(" connect"))
return i0 > 0 && i1 > 0 && i2 > 0
}
if err := s.wait("lldb start", cond, 5*time.Second); err != nil {
fmt.Printf("lldb start error: %v\n", err)
return nil, errRetry
}
return s, nil
}
func (s *lldbSession) do(cmd string) { s.doCmd(cmd, "(lldb)", 0) }
func (s *lldbSession) doCmd(cmd string, waitFor string, extraTimeout time.Duration) {
startLen := s.out.Len()
fmt.Fprintln(s.in, cmd)
cond := func(out *buf) bool {
i := s.out.LastIndex([]byte(waitFor))
return i > startLen
}
if err := s.wait(fmt.Sprintf("running cmd %q", cmd), cond, extraTimeout); err != nil {
panic(waitPanic{err})
}
}
func (s *lldbSession) wait(reason string, cond func(out *buf) bool, extraTimeout time.Duration) error {
doTimeout := 1*time.Second + extraTimeout
doTimedout := time.After(doTimeout)
for {
select {
case <-timedout:
w.printBuf()
if p := cmd.Process; p != nil {
case <-s.timedout:
if p := s.cmd.Process; p != nil {
p.Kill()
}
return fmt.Errorf("timeout (stage %s)", stage)
case err := <-exited:
w.printBuf()
return fmt.Errorf("failed (stage %s): %v", stage, err)
case i := <-w.find(str, timeout):
if i < 0 {
log.Printf("timed out on stage %q, retrying", stage)
return errRetry
return fmt.Errorf("test timeout (%s)", reason)
case <-doTimedout:
return fmt.Errorf("command timeout (%s for %v)", reason, doTimeout)
case err := <-s.exited:
return fmt.Errorf("exited (%s: %v)", reason, err)
default:
if cond(s.out) {
return nil
}
w.clearTo(i + len(str))
return nil
time.Sleep(20 * time.Millisecond)
}
}
do := func(cmd string) {
fmt.Fprintln(lldb, cmd)
if err := waitFor(fmt.Sprintf("prompt after %q", cmd), "(lldb)", 0); err != nil {
panic(waitPanic{err})
}
}
// Wait for installation and connection.
if err := waitFor("ios-deploy before run", "(lldb)", 0); err != nil {
// Retry if we see a rare and longstanding ios-deploy bug.
// https://github.com/phonegap/ios-deploy/issues/11
// Assertion failed: (AMDeviceStartService(device, CFSTR("com.apple.debugserver"), &gdbfd, NULL) == 0)
log.Printf("%v, retrying", err)
return errRetry
}
// Script LLDB. Oh dear.
do(`process handle SIGHUP --stop false --pass true --notify false`)
do(`process handle SIGPIPE --stop false --pass true --notify false`)
do(`process handle SIGUSR1 --stop false --pass true --notify false`)
do(`process handle SIGSEGV --stop false --pass true --notify false`) // does not work
do(`process handle SIGBUS --stop false --pass true --notify false`) // does not work
if opts.lldb {
_, err := io.Copy(lldb, os.Stdin)
if err != io.EOF {
return err
}
return nil
}
do(`breakpoint set -n getwd`) // in runtime/cgo/gcc_darwin_arm.go
fmt.Fprintln(lldb, `run`)
if err := waitFor("br getwd", "stop reason = breakpoint", 20*time.Second); err != nil {
// At this point we see several flaky errors from the iOS
// build infrastructure. The most common is never reaching
// the breakpoint, which we catch with a timeout. Very
// occasionally lldb can produce errors like:
//
// Breakpoint 1: no locations (pending).
// WARNING: Unable to resolve breakpoint to any actual locations.
//
// As no actual test code has been executed by this point,
// we treat all errors as recoverable.
if err != errRetry {
log.Printf("%v, retrying", err)
err = errRetry
}
return err
}
if err := waitFor("br getwd prompt", "(lldb)", 0); err != nil {
return err
}
// Move the current working directory into the faux gopath.
if pkgpath != "src" {
do(`breakpoint delete 1`)
do(`expr char* $mem = (char*)malloc(512)`)
do(`expr $mem = (char*)getwd($mem, 512)`)
do(`expr $mem = (char*)strcat($mem, "/` + pkgpath + `")`)
do(`call (void)chdir($mem)`)
}
// Run the tests.
w.trimSuffix("(lldb) ")
fmt.Fprintln(lldb, `process continue`)
// Wait for the test to complete.
select {
case <-timedout:
w.printBuf()
if p := cmd.Process; p != nil {
p.Kill()
}
return errors.New("timeout running tests")
case <-w.find("\nPASS", 0):
passed := w.isPass()
w.printBuf()
if passed {
return nil
}
return errors.New("test failure")
case err := <-exited:
// The returned lldb error code is usually non-zero.
// We check for test success by scanning for the final
// PASS returned by the test harness, assuming the worst
// in its absence.
if w.isPass() {
err = nil
} else if err == nil {
err = errors.New("test failure")
}
w.printBuf()
return err
}
}
type bufWriter struct {
mu sync.Mutex
buf []byte
suffix []byte // remove from each Write
findTxt []byte // search buffer on each Write
findCh chan int // report find position
findAfter *time.Timer
type buf struct {
mu sync.Mutex
buf []byte
}
func (w *bufWriter) Write(in []byte) (n int, err error) {
func (w *buf) Write(in []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
n = len(in)
in = bytes.TrimSuffix(in, w.suffix)
if debug {
inTxt := strings.Replace(string(in), "\n", "\\n", -1)
findTxt := strings.Replace(string(w.findTxt), "\n", "\\n", -1)
fmt.Printf("debug --> %s <-- debug (findTxt='%s')\n", inTxt, findTxt)
}
w.buf = append(w.buf, in...)
if len(w.findTxt) > 0 {
if i := bytes.Index(w.buf, w.findTxt); i >= 0 {
w.findCh <- i
close(w.findCh)
w.findTxt = nil
w.findCh = nil
if w.findAfter != nil {
w.findAfter.Stop()
w.findAfter = nil
}
}
}
return n, nil
return len(in), nil
}
func (w *bufWriter) trimSuffix(p string) {
func (w *buf) LastIndex(sep []byte) int {
w.mu.Lock()
defer w.mu.Unlock()
w.suffix = []byte(p)
return bytes.LastIndex(w.buf, sep)
}
func (w *bufWriter) printBuf() {
w.mu.Lock()
defer w.mu.Unlock()
fmt.Fprintf(os.Stderr, "%s", w.buf)
w.buf = nil
}
func (w *bufWriter) clearTo(i int) {
w.mu.Lock()
defer w.mu.Unlock()
w.buf = w.buf[i:]
}
// find returns a channel that will have exactly one byte index sent
// to it when the text str appears in the buffer. If the text does not
// appear before timeout, -1 is sent.
//
// A timeout of zero means no timeout.
func (w *bufWriter) find(str string, timeout time.Duration) <-chan int {
w.mu.Lock()
defer w.mu.Unlock()
if len(w.findTxt) > 0 {
panic(fmt.Sprintf("find(%s): already trying to find %s", str, w.findTxt))
}
txt := []byte(str)
ch := make(chan int, 1)
if i := bytes.Index(w.buf, txt); i >= 0 {
ch <- i
close(ch)
} else {
w.findTxt = txt
w.findCh = ch
if timeout > 0 {
w.findAfter = time.AfterFunc(timeout, func() {
w.mu.Lock()
defer w.mu.Unlock()
if w.findCh == ch {
w.findTxt = nil
w.findCh = nil
w.findAfter = nil
ch <- -1
close(ch)
}
})
}
}
return ch
}
func (w *bufWriter) isPass() bool {
func (w *buf) Bytes() []byte {
w.mu.Lock()
defer w.mu.Unlock()
// The final stdio of lldb is non-deterministic, so we
// scan the whole buffer.
//
// Just to make things fun, lldb sometimes translates \n
// into \r\n.
return bytes.Contains(w.buf, []byte("\nPASS\n")) || bytes.Contains(w.buf, []byte("\nPASS\r"))
b := make([]byte, len(w.buf))
copy(b, w.buf)
return b
}
func (w *buf) Len() int {
w.mu.Lock()
defer w.mu.Unlock()
return len(w.buf)
}
type waitPanic struct {
err error
}
type options struct {

79
misc/sortac/sortac.go Normal file
View File

@ -0,0 +1,79 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Sortac sorts the AUTHORS and CONTRIBUTORS files.
//
// Usage:
//
// sortac [file...]
//
// Sortac sorts the named files in place.
// If given no arguments, it sorts standard input to standard output.
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"golang.org/x/text/collate"
"golang.org/x/text/language"
)
func main() {
log.SetFlags(0)
log.SetPrefix("sortac: ")
flag.Parse()
args := flag.Args()
if len(args) == 0 {
os.Stdout.Write(sortAC(os.Stdin))
} else {
for _, arg := range args {
f, err := os.Open(arg)
if err != nil {
log.Fatal(err)
}
sorted := sortAC(f)
f.Close()
if err := ioutil.WriteFile(arg, sorted, 0644); err != nil {
log.Fatal(err)
}
}
}
}
func sortAC(r io.Reader) []byte {
bs := bufio.NewScanner(r)
var header []string
var lines []string
for bs.Scan() {
t := bs.Text()
lines = append(lines, t)
if t == "# Please keep the list sorted." {
header = lines
lines = nil
continue
}
}
if err := bs.Err(); err != nil {
log.Fatal(err)
}
var out bytes.Buffer
c := collate.New(language.Und, collate.Loose)
c.SortStrings(lines)
for _, l := range header {
fmt.Fprintln(&out, l)
}
for _, l := range lines {
fmt.Fprintln(&out, l)
}
return out.Bytes()
}

View File

@ -23,6 +23,14 @@ if [ "$GOOS" != "android" ]; then
exit 1
fi
if [ -z $GOARM ]; then
export GOARM=7
fi
if [ "$GOARM" != "7" ]; then
echo "android only supports GOARM=7, got GOARM=$GOARM" 1>&2
exit 1
fi
export CGO_ENABLED=1
unset GOBIN
@ -56,7 +64,16 @@ mkdir -p $FAKE_GOROOT/pkg
cp -a "${GOROOT}/src" "${FAKE_GOROOT}/"
cp -a "${GOROOT}/test" "${FAKE_GOROOT}/"
cp -a "${GOROOT}/lib" "${FAKE_GOROOT}/"
cp -a "${GOROOT}/pkg/android_$GOARCH" "${FAKE_GOROOT}/pkg/"
# For android, the go tool will install the compiled package in
# pkg/android_${GOARCH}_shared directory by default, not in
# the usual pkg/${GOOS}_${GOARCH}. Some tests in src/go/* assume
# the compiled packages were installed in the usual places.
# Instead of reflecting this exception into the go/* packages,
# we copy the compiled packages into the usual places.
cp -a "${GOROOT}/pkg/android_${GOARCH}_shared" "${FAKE_GOROOT}/pkg/"
mv "${FAKE_GOROOT}/pkg/android_${GOARCH}_shared" "${FAKE_GOROOT}/pkg/android_${GOARCH}"
echo '# Syncing test files to android device'
adb shell mkdir -p /data/local/tmp/goroot
time adb sync data &> /dev/null

View File

@ -446,16 +446,45 @@ func (tr *Reader) octal(b []byte) int64 {
return int64(x)
}
// skipUnread skips any unread bytes in the existing file entry, as well as any alignment padding.
func (tr *Reader) skipUnread() {
nr := tr.numBytes() + tr.pad // number of bytes to skip
// skipUnread skips any unread bytes in the existing file entry, as well as any
// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
// encountered in the data portion; it is okay to hit io.EOF in the padding.
//
// Note that this function still works properly even when sparse files are being
// used since numBytes returns the bytes remaining in the underlying io.Reader.
func (tr *Reader) skipUnread() error {
dataSkip := tr.numBytes() // Number of data bytes to skip
totalSkip := dataSkip + tr.pad // Total number of bytes to skip
tr.curr, tr.pad = nil, 0
if sr, ok := tr.r.(io.Seeker); ok {
if _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {
return
// If possible, Seek to the last byte before the end of the data section.
// Do this because Seek is often lazy about reporting errors; this will mask
// the fact that the tar stream may be truncated. We can rely on the
// io.CopyN done shortly afterwards to trigger any IO errors.
var seekSkipped int64 // Number of bytes skipped via Seek
if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
// io.Seeker, but calling Seek always returns an error and performs
// no action. Thus, we try an innocent seek to the current position
// to see if Seek is really supported.
pos1, err := sr.Seek(0, os.SEEK_CUR)
if err == nil {
// Seek seems supported, so perform the real Seek.
pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
if err != nil {
tr.err = err
return tr.err
}
seekSkipped = pos2 - pos1
}
}
_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)
var copySkipped int64 // Number of bytes skipped via CopyN
copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
tr.err = io.ErrUnexpectedEOF
}
return tr.err
}
func (tr *Reader) verifyChecksum(header []byte) bool {
@ -468,18 +497,25 @@ func (tr *Reader) verifyChecksum(header []byte) bool {
return given == unsigned || given == signed
}
// readHeader reads the next block header and assumes that the underlying reader
// is already aligned to a block boundary.
//
// The err will be set to io.EOF only when one of the following occurs:
// * Exactly 0 bytes are read and EOF is hit.
// * Exactly 1 block of zeros is read and EOF is hit.
// * At least 2 blocks of zeros are read.
func (tr *Reader) readHeader() *Header {
header := tr.hdrBuff[:]
copy(header, zeroBlock)
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil
return nil // io.EOF is okay here
}
// Two blocks of zero bytes marks the end of the archive.
if bytes.Equal(header, zeroBlock[0:blockSize]) {
if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil {
return nil
return nil // io.EOF is okay here
}
if bytes.Equal(header, zeroBlock[0:blockSize]) {
tr.err = io.EOF

View File

@ -422,35 +422,6 @@ func TestPartialRead(t *testing.T) {
}
}
func TestNonSeekable(t *testing.T) {
test := gnuTarTest
f, err := os.Open(test.file)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer f.Close()
type readerOnly struct {
io.Reader
}
tr := NewReader(readerOnly{f})
nread := 0
for ; ; nread++ {
_, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
if nread != len(test.headers) {
t.Errorf("Didn't process all files\nexpected: %d\nprocessed %d\n", len(test.headers), nread)
}
}
func TestParsePAXHeader(t *testing.T) {
paxTests := [][3]string{
{"a", "a=name", "10 a=name\n"}, // Test case involving multiple acceptable lengths
@ -803,3 +774,130 @@ func TestUninitializedRead(t *testing.T) {
}
}
type reader struct{ io.Reader }
type readSeeker struct{ io.ReadSeeker }
type readBadSeeker struct{ io.ReadSeeker }
func (rbs *readBadSeeker) Seek(int64, int) (int64, error) { return 0, fmt.Errorf("illegal seek") }
// TestReadTruncation test the ending condition on various truncated files and
// that truncated files are still detected even if the underlying io.Reader
// satisfies io.Seeker.
func TestReadTruncation(t *testing.T) {
var ss []string
for _, p := range []string{
"testdata/gnu.tar",
"testdata/ustar-file-reg.tar",
"testdata/pax-path-hdr.tar",
"testdata/sparse-formats.tar",
} {
buf, err := ioutil.ReadFile(p)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
ss = append(ss, string(buf))
}
data1, data2, pax, sparse := ss[0], ss[1], ss[2], ss[3]
data2 += strings.Repeat("\x00", 10*512)
trash := strings.Repeat("garbage ", 64) // Exactly 512 bytes
var vectors = []struct {
input string // Input stream
cnt int // Expected number of headers read
err error // Expected error outcome
}{
{"", 0, io.EOF}, // Empty file is a "valid" tar file
{data1[:511], 0, io.ErrUnexpectedEOF},
{data1[:512], 1, io.ErrUnexpectedEOF},
{data1[:1024], 1, io.EOF},
{data1[:1536], 2, io.ErrUnexpectedEOF},
{data1[:2048], 2, io.EOF},
{data1, 2, io.EOF},
{data1[:2048] + data2[:1536], 3, io.EOF},
{data2[:511], 0, io.ErrUnexpectedEOF},
{data2[:512], 1, io.ErrUnexpectedEOF},
{data2[:1195], 1, io.ErrUnexpectedEOF},
{data2[:1196], 1, io.EOF}, // Exact end of data and start of padding
{data2[:1200], 1, io.EOF},
{data2[:1535], 1, io.EOF},
{data2[:1536], 1, io.EOF}, // Exact end of padding
{data2[:1536] + trash[:1], 1, io.ErrUnexpectedEOF},
{data2[:1536] + trash[:511], 1, io.ErrUnexpectedEOF},
{data2[:1536] + trash, 1, ErrHeader},
{data2[:2048], 1, io.EOF}, // Exactly 1 empty block
{data2[:2048] + trash[:1], 1, io.ErrUnexpectedEOF},
{data2[:2048] + trash[:511], 1, io.ErrUnexpectedEOF},
{data2[:2048] + trash, 1, ErrHeader},
{data2[:2560], 1, io.EOF}, // Exactly 2 empty blocks (normal end-of-stream)
{data2[:2560] + trash[:1], 1, io.EOF},
{data2[:2560] + trash[:511], 1, io.EOF},
{data2[:2560] + trash, 1, io.EOF},
{data2[:3072], 1, io.EOF},
{pax, 0, io.EOF}, // PAX header without data is a "valid" tar file
{pax + trash[:1], 0, io.ErrUnexpectedEOF},
{pax + trash[:511], 0, io.ErrUnexpectedEOF},
{sparse[:511], 0, io.ErrUnexpectedEOF},
// TODO(dsnet): This should pass, but currently fails.
// {sparse[:512], 0, io.ErrUnexpectedEOF},
{sparse[:3584], 1, io.EOF},
{sparse[:9200], 1, io.EOF}, // Terminate in padding of sparse header
{sparse[:9216], 1, io.EOF},
{sparse[:9728], 2, io.ErrUnexpectedEOF},
{sparse[:10240], 2, io.EOF},
{sparse[:11264], 2, io.ErrUnexpectedEOF},
{sparse, 5, io.EOF},
{sparse + trash, 5, io.EOF},
}
for i, v := range vectors {
for j := 0; j < 6; j++ {
var tr *Reader
var s1, s2 string
switch j {
case 0:
tr = NewReader(&reader{strings.NewReader(v.input)})
s1, s2 = "io.Reader", "auto"
case 1:
tr = NewReader(&reader{strings.NewReader(v.input)})
s1, s2 = "io.Reader", "manual"
case 2:
tr = NewReader(&readSeeker{strings.NewReader(v.input)})
s1, s2 = "io.ReadSeeker", "auto"
case 3:
tr = NewReader(&readSeeker{strings.NewReader(v.input)})
s1, s2 = "io.ReadSeeker", "manual"
case 4:
tr = NewReader(&readBadSeeker{strings.NewReader(v.input)})
s1, s2 = "ReadBadSeeker", "auto"
case 5:
tr = NewReader(&readBadSeeker{strings.NewReader(v.input)})
s1, s2 = "ReadBadSeeker", "manual"
}
var cnt int
var err error
for {
if _, err = tr.Next(); err != nil {
break
}
cnt++
if s2 == "manual" {
if _, err = io.Copy(ioutil.Discard, tr); err != nil {
break
}
}
}
if err != v.err {
t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %v, want %v",
i, s1, s2, err, v.err)
}
if cnt != v.cnt {
t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %d headers, want %d headers",
i, s1, s2, cnt, v.cnt)
}
}
}
}

Binary file not shown.

Binary file not shown.

View File

@ -12,8 +12,8 @@ import (
"errors"
"fmt"
"io"
"os"
"path"
"sort"
"strconv"
"strings"
"time"
@ -288,11 +288,11 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
// succeed, and seems harmless enough.
ext.ModTime = hdr.ModTime
// The spec asks that we namespace our pseudo files
// with the current pid.
pid := os.Getpid()
// with the current pid. However, this results in differing outputs
// for identical inputs. As such, the constant 0 is now used instead.
// golang.org/issue/12358
dir, file := path.Split(hdr.Name)
fullName := path.Join(dir,
fmt.Sprintf("PaxHeaders.%d", pid), file)
fullName := path.Join(dir, "PaxHeaders.0", file)
ascii := toASCII(fullName)
if len(ascii) > 100 {
@ -302,8 +302,15 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) erro
// Construct the body
var buf bytes.Buffer
for k, v := range paxHeaders {
fmt.Fprint(&buf, paxHeader(k+"="+v))
// Keys are sorted before writing to body to allow deterministic output.
var keys []string
for k := range paxHeaders {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprint(&buf, paxHeader(k+"="+paxHeaders[k]))
}
ext.Size = int64(len(buf.Bytes()))

View File

@ -11,6 +11,7 @@ import (
"io/ioutil"
"os"
"reflect"
"sort"
"strings"
"testing"
"testing/iotest"
@ -291,7 +292,7 @@ func TestPax(t *testing.T) {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
@ -330,7 +331,7 @@ func TestPaxSymlink(t *testing.T) {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
@ -380,7 +381,7 @@ func TestPaxNonAscii(t *testing.T) {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.")) {
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// Test that we can get a long name back out of the archive.
@ -439,6 +440,52 @@ func TestPaxXattrs(t *testing.T) {
}
}
func TestPaxHeadersSorted(t *testing.T) {
fileinfo, err := os.Stat("testdata/small.txt")
if err != nil {
t.Fatal(err)
}
hdr, err := FileInfoHeader(fileinfo, "")
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
contents := strings.Repeat(" ", int(hdr.Size))
hdr.Xattrs = map[string]string{
"foo": "foo",
"bar": "bar",
"baz": "baz",
"qux": "qux",
}
var buf bytes.Buffer
writer := NewWriter(&buf)
if err := writer.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err = writer.Write([]byte(contents)); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
// Simple test to make sure PAX extensions are in effect
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
t.Fatal("Expected at least one PAX header to be written.")
}
// xattr bar should always appear before others
indices := []int{
bytes.Index(buf.Bytes(), []byte("bar=bar")),
bytes.Index(buf.Bytes(), []byte("baz=baz")),
bytes.Index(buf.Bytes(), []byte("foo=foo")),
bytes.Index(buf.Bytes(), []byte("qux=qux")),
}
if !sort.IntsAreSorted(indices) {
t.Fatal("PAX headers are not sorted")
}
}
func TestPAXHeader(t *testing.T) {
medName := strings.Repeat("CD", 50)
longName := strings.Repeat("AB", 100)

View File

@ -8,6 +8,7 @@ import (
"cmd/internal/obj"
"cmd/internal/obj/arm"
"cmd/internal/obj/arm64"
"cmd/internal/obj/mips"
"cmd/internal/obj/ppc64"
"cmd/internal/obj/x86"
"fmt"
@ -65,6 +66,14 @@ func Set(GOARCH string) *Arch {
return archArm()
case "arm64":
return archArm64()
case "mips64":
a := archMips64()
a.LinkArch = &mips.Linkmips64
return a
case "mips64le":
a := archMips64()
a.LinkArch = &mips.Linkmips64le
return a
case "ppc64":
a := archPPC64()
a.LinkArch = &ppc64.Linkppc64
@ -363,3 +372,57 @@ func archPPC64() *Arch {
IsJump: jumpPPC64,
}
}
func archMips64() *Arch {
register := make(map[string]int16)
// Create maps for easy lookup of instruction names etc.
// Note that there is no list of names as there is for x86.
for i := mips.REG_R0; i <= mips.REG_R31; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := mips.REG_F0; i <= mips.REG_F31; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := mips.REG_M0; i <= mips.REG_M31; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
register[obj.Rconv(i)] = int16(i)
}
register["HI"] = mips.REG_HI
register["LO"] = mips.REG_LO
// Pseudo-registers.
register["SB"] = RSB
register["FP"] = RFP
register["PC"] = RPC
// Avoid unintentionally clobbering g using R30.
delete(register, "R30")
register["g"] = mips.REG_R30
registerPrefix := map[string]bool{
"F": true,
"FCR": true,
"M": true,
"R": true,
}
instructions := make(map[string]int)
for i, s := range obj.Anames {
instructions[s] = i
}
for i, s := range mips.Anames {
if i >= obj.A_ARCHSPECIFIC {
instructions[s] = i + obj.ABaseMIPS64
}
}
// Annoying alias.
instructions["JAL"] = mips.AJAL
return &Arch{
LinkArch: &mips.Linkmips64,
Instructions: instructions,
Register: register,
RegisterPrefix: registerPrefix,
RegisterNumber: mipsRegisterNumber,
IsJump: jumpMIPS64,
}
}

View File

@ -0,0 +1,64 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file encapsulates some of the odd characteristics of the
// 64-bit MIPS (MIPS64) instruction set, to minimize its interaction
// with the core of the assembler.
package arch
import "cmd/internal/obj/mips"
func jumpMIPS64(word string) bool {
switch word {
case "BEQ", "BFPF", "BFPT", "BGEZ", "BGEZAL", "BGTZ", "BLEZ", "BLTZ", "BLTZAL", "BNE", "JMP", "JAL", "CALL":
return true
}
return false
}
// IsMIPS64CMP reports whether the op (as defined by an mips.A* constant) is
// one of the CMP instructions that require special handling.
func IsMIPS64CMP(op int) bool {
switch op {
case mips.ACMPEQF, mips.ACMPEQD, mips.ACMPGEF, mips.ACMPGED,
mips.ACMPGTF, mips.ACMPGTD:
return true
}
return false
}
// IsMIPS64MUL reports whether the op (as defined by an mips.A* constant) is
// one of the MUL/DIV/REM instructions that require special handling.
func IsMIPS64MUL(op int) bool {
switch op {
case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU,
mips.ADIV, mips.ADIVU, mips.ADIVV, mips.ADIVVU,
mips.AREM, mips.AREMU, mips.AREMV, mips.AREMVU:
return true
}
return false
}
func mipsRegisterNumber(name string, n int16) (int16, bool) {
switch name {
case "F":
if 0 <= n && n <= 31 {
return mips.REG_F0 + n, true
}
case "FCR":
if 0 <= n && n <= 31 {
return mips.REG_FCR0 + n, true
}
case "M":
if 0 <= n && n <= 31 {
return mips.REG_M0 + n, true
}
case "R":
if 0 <= n && n <= 31 {
return mips.REG_R0 + n, true
}
}
return 0, false
}

View File

@ -373,6 +373,14 @@ func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
prog.Reg = reg
break
}
if p.arch.Thechar == '0' {
// 3-operand jumps.
// First two must be registers
target = &a[2]
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
break
}
fallthrough
default:
p.errorf("wrong number of arguments to %s instruction", obj.Aconv(op))
@ -509,11 +517,21 @@ func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
break
} else if p.arch.Thechar == '0' {
if arch.IsMIPS64CMP(op) || arch.IsMIPS64MUL(op) {
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
break
}
}
prog.From = a[0]
prog.To = a[1]
case 3:
switch p.arch.Thechar {
case '0':
prog.From = a[0]
prog.Reg = p.getRegister(prog, op, &a[1])
prog.To = a[2]
case '5':
// Special cases.
if arch.IsARMSTREX(op) {

View File

@ -89,3 +89,7 @@ func TestAMD64EndToEnd(t *testing.T) {
func Test386EndToEnd(t *testing.T) {
testEndToEnd(t, "386")
}
func TestMIPS64EndToEnd(t *testing.T) {
testEndToEnd(t, "mips64")
}

View File

@ -65,6 +65,11 @@ func TestPPC64OperandParser(t *testing.T) {
testOperandParser(t, parser, ppc64OperandTests)
}
func TestMIPS64OperandParser(t *testing.T) {
parser := newParser("mips64")
testOperandParser(t, parser, mips64OperandTests)
}
type operandTest struct {
input, output string
}
@ -435,3 +440,86 @@ var arm64OperandTests = []operandTest{
{"(R29, RSP)", "(R29, RSP)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}
var mips64OperandTests = []operandTest{
{"$((1<<63)-1)", "$9223372036854775807"},
{"$(-64*1024)", "$-65536"},
{"$(1024 * 8)", "$8192"},
{"$-1", "$-1"},
{"$-24(R4)", "$-24(R4)"},
{"$0", "$0"},
{"$0(R1)", "$(R1)"},
{"$0.5", "$(0.5)"},
{"$0x7000", "$28672"},
{"$0x88888eef", "$2290650863"},
{"$1", "$1"},
{"$_main<>(SB)", "$_main<>(SB)"},
{"$argframe(FP)", "$argframe(FP)"},
{"$~3", "$-4"},
{"(-288-3*8)(R1)", "-312(R1)"},
{"(16)(R7)", "16(R7)"},
{"(8)(g)", "8(g)"},
{"(R0)", "(R0)"},
{"(R3)", "(R3)"},
{"(R4)", "(R4)"},
{"(R5)", "(R5)"},
{"-1(R4)", "-1(R4)"},
{"-1(R5)", "-1(R5)"},
{"6(PC)", "6(PC)"},
{"F14", "F14"},
{"F15", "F15"},
{"F16", "F16"},
{"F17", "F17"},
{"F18", "F18"},
{"F19", "F19"},
{"F20", "F20"},
{"F21", "F21"},
{"F22", "F22"},
{"F23", "F23"},
{"F24", "F24"},
{"F25", "F25"},
{"F26", "F26"},
{"F27", "F27"},
{"F28", "F28"},
{"F29", "F29"},
{"F30", "F30"},
{"F31", "F31"},
{"R0", "R0"},
{"R1", "R1"},
{"R11", "R11"},
{"R12", "R12"},
{"R13", "R13"},
{"R14", "R14"},
{"R15", "R15"},
{"R16", "R16"},
{"R17", "R17"},
{"R18", "R18"},
{"R19", "R19"},
{"R2", "R2"},
{"R20", "R20"},
{"R21", "R21"},
{"R22", "R22"},
{"R23", "R23"},
{"R24", "R24"},
{"R25", "R25"},
{"R26", "R26"},
{"R27", "R27"},
{"R28", "R28"},
{"R29", "R29"},
{"R3", "R3"},
{"R31", "R31"},
{"R4", "R4"},
{"R5", "R5"},
{"R6", "R6"},
{"R7", "R7"},
{"R8", "R8"},
{"R9", "R9"},
{"LO", "LO"},
{"a(FP)", "a(FP)"},
{"g", "g"},
{"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
{"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
}

View File

@ -0,0 +1,99 @@
8 00001 (testdata/mips64.s:8) TEXT foo(SB), 0, $0
18 00002 (testdata/mips64.s:18) MOVW R1, R2
19 00003 (testdata/mips64.s:19) MOVW LO, R1
20 00004 (testdata/mips64.s:20) MOVW HI, R1
21 00005 (testdata/mips64.s:21) MOVW R1, LO
22 00006 (testdata/mips64.s:22) MOVW R1, HI
23 00007 (testdata/mips64.s:23) MOVV R1, R2
24 00008 (testdata/mips64.s:24) MOVV LO, R1
25 00009 (testdata/mips64.s:25) MOVV HI, R1
26 00010 (testdata/mips64.s:26) MOVV R1, LO
27 00011 (testdata/mips64.s:27) MOVV R1, HI
33 00012 (testdata/mips64.s:33) MOVW foo<>+3(SB), R2
34 00013 (testdata/mips64.s:34) MOVW 16(R1), R2
35 00014 (testdata/mips64.s:35) MOVW (R1), R2
36 00015 (testdata/mips64.s:36) MOVV foo<>+3(SB), R2
37 00016 (testdata/mips64.s:37) MOVV 16(R1), R2
38 00017 (testdata/mips64.s:38) MOVV (R1), R2
44 00018 (testdata/mips64.s:44) MOVB R1, R2
50 00019 (testdata/mips64.s:50) MOVB foo<>+3(SB), R2
51 00020 (testdata/mips64.s:51) MOVB 16(R1), R2
52 00021 (testdata/mips64.s:52) MOVB (R1), R2
61 00022 (testdata/mips64.s:61) MOVD foo<>+3(SB), F2
62 00023 (testdata/mips64.s:62) MOVD 16(R1), F2
63 00024 (testdata/mips64.s:63) MOVD (R1), F2
69 00025 (testdata/mips64.s:69) MOVD $(0.10000000000000001), F2
75 00026 (testdata/mips64.s:75) MOVD F1, F2
81 00027 (testdata/mips64.s:81) MOVD F2, foo<>+3(SB)
82 00028 (testdata/mips64.s:82) MOVD F2, 16(R1)
83 00029 (testdata/mips64.s:83) MOVD F2, (R1)
92 00030 (testdata/mips64.s:92) MOVW R1, foo<>+3(SB)
93 00031 (testdata/mips64.s:93) MOVW R1, 16(R2)
94 00032 (testdata/mips64.s:94) MOVW R1, (R2)
95 00033 (testdata/mips64.s:95) MOVV R1, foo<>+3(SB)
96 00034 (testdata/mips64.s:96) MOVV R1, 16(R2)
97 00035 (testdata/mips64.s:97) MOVV R1, (R2)
103 00036 (testdata/mips64.s:103) MOVB R1, foo<>+3(SB)
104 00037 (testdata/mips64.s:104) MOVB R1, 16(R2)
105 00038 (testdata/mips64.s:105) MOVB R1, (R2)
114 00039 (testdata/mips64.s:114) MOVD F1, foo<>+3(SB)
115 00040 (testdata/mips64.s:115) MOVD F1, 16(R2)
116 00041 (testdata/mips64.s:116) MOVD F1, (R2)
125 00042 (testdata/mips64.s:125) MOVW FCR0, R1
131 00043 (testdata/mips64.s:131) MOVW R1, FCR0
137 00044 (testdata/mips64.s:137) MOVW R1, M1
138 00045 (testdata/mips64.s:138) MOVV R1, M1
144 00046 (testdata/mips64.s:144) MOVW M1, R1
145 00047 (testdata/mips64.s:145) MOVV M1, R1
158 00048 (testdata/mips64.s:158) ADD R1, R2, R3
164 00049 (testdata/mips64.s:164) ADD $1, R2, R3
170 00050 (testdata/mips64.s:170) ADD R1, R2
176 00051 (testdata/mips64.s:176) ADD $4, R1
182 00052 (testdata/mips64.s:182) MUL R1, R2
188 00053 (testdata/mips64.s:188) SLL R1, R2, R3
194 00054 (testdata/mips64.s:194) SLL R1, R2
200 00055 (testdata/mips64.s:200) SLL $4, R1, R2
206 00056 (testdata/mips64.s:206) SLL $4, R1
215 00057 (testdata/mips64.s:215) MOVW $1, R1
216 00058 (testdata/mips64.s:216) MOVV $1, R1
222 00059 (testdata/mips64.s:222) MOVW $1, R1
223 00060 (testdata/mips64.s:223) MOVW $foo(SB), R1
224 00061 (testdata/mips64.s:224) MOVV $1, R1
225 00062 (testdata/mips64.s:225) MOVV $foo(SB), R1
236 00063 (testdata/mips64.s:236) JMP 64(PC)
237 00064 (testdata/mips64.s:237) JMP 63
238 00065 (testdata/mips64.s:238) CALL 66(PC)
239 00066 (testdata/mips64.s:239) CALL 63
245 00067 (testdata/mips64.s:245) JMP 4(R1)
246 00068 (testdata/mips64.s:246) JMP foo(SB)
247 00069 (testdata/mips64.s:247) CALL 4(R1)
248 00070 (testdata/mips64.s:248) CALL foo(SB)
258 00071 (testdata/mips64.s:258) BEQ R1, 72(PC)
259 00072 (testdata/mips64.s:259) BEQ R1, 71
266 00073 (testdata/mips64.s:266) BEQ R1, R2, 74(PC)
267 00074 (testdata/mips64.s:267) BEQ R1, R2, 73
277 00075 (testdata/mips64.s:277) BLTZ R1, 76(PC)
278 00076 (testdata/mips64.s:278) BLTZ R1, 75
285 00077 (testdata/mips64.s:285) BFPT 78(PC)
286 00078 (testdata/mips64.s:286) BFPT 77
296 00079 (testdata/mips64.s:296) ABSD F1, F2
302 00080 (testdata/mips64.s:302) ADDD F1, F2
308 00081 (testdata/mips64.s:308) ADDD F1, F2, F3
314 00082 (testdata/mips64.s:314) CMPEQD F1, F2
320 00083 (testdata/mips64.s:320) WORD $1
321 00084 (testdata/mips64.s:321) WORD $foo(SB)
330 00085 (testdata/mips64.s:330) NOP
336 00086 (testdata/mips64.s:336) NOP R2
342 00087 (testdata/mips64.s:342) NOP F2
348 00088 (testdata/mips64.s:348) NOP R2
354 00089 (testdata/mips64.s:354) NOP F2
360 00090 (testdata/mips64.s:360) NOP $4
365 00091 (testdata/mips64.s:365) SYSCALL
366 00092 (testdata/mips64.s:366) BREAK
367 00093 (testdata/mips64.s:367) BREAK $1, (R1)
376 00094 (testdata/mips64.s:376) SYSCALL
377 00095 (testdata/mips64.s:377) RET
382 00096 (testdata/mips64.s:382) CALL foo(SB)
383 00097 (testdata/mips64.s:383) JMP foo(SB)
384 00098 (testdata/mips64.s:384) CALL foo(SB)
392 00099 (testdata/mips64.s:392) END

View File

@ -0,0 +1,392 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This input was created by taking the ppc64 testcase and modified
// by hand.
TEXT foo(SB),0,$0
//inst:
//
// load ints and bytes
//
// LMOVW rreg ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW R1, R2
MOVW LO, R1
MOVW HI, R1
MOVW R1, LO
MOVW R1, HI
MOVV R1, R2
MOVV LO, R1
MOVV HI, R1
MOVV R1, LO
MOVV R1, HI
// LMOVW addr ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW foo<>+3(SB), R2
MOVW 16(R1), R2
MOVW (R1), R2
MOVV foo<>+3(SB), R2
MOVV 16(R1), R2
MOVV (R1), R2
// LMOVB rreg ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVB R1, R2
// LMOVB addr ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVB foo<>+3(SB), R2
MOVB 16(R1), R2
MOVB (R1), R2
//
// load floats
//
// LFMOV addr ',' freg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVD foo<>+3(SB), F2
MOVD 16(R1), F2
MOVD (R1), F2
// LFMOV fimm ',' freg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVD $0.1, F2
// LFMOV freg ',' freg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVD F1, F2
// LFMOV freg ',' addr
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVD F2, foo<>+3(SB)
MOVD F2, 16(R1)
MOVD F2, (R1)
//
// store ints and bytes
//
// LMOVW rreg ',' addr
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW R1, foo<>+3(SB)
MOVW R1, 16(R2)
MOVW R1, (R2)
MOVV R1, foo<>+3(SB)
MOVV R1, 16(R2)
MOVV R1, (R2)
// LMOVB rreg ',' addr
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVB R1, foo<>+3(SB)
MOVB R1, 16(R2)
MOVB R1, (R2)
//
// store floats
//
// LMOVW freg ',' addr
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVD F1, foo<>+3(SB)
MOVD F1, 16(R2)
MOVD F1, (R2)
//
// floating point status
//
// LMOVW fpscr ',' freg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW FCR0, R1
// LMOVW freg ',' fpscr
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW R1, FCR0
// LMOVW rreg ',' mreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW R1, M1
MOVV R1, M1
// LMOVW mreg ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW M1, R1
MOVV M1, R1
//
// integer operations
// logical instructions
// shift instructions
// unary instructions
//
// LADDW rreg ',' sreg ',' rreg
// {
// outcode(int($1), &$2, int($4), &$6);
// }
ADD R1, R2, R3
// LADDW imm ',' sreg ',' rreg
// {
// outcode(int($1), &$2, int($4), &$6);
// }
ADD $1, R2, R3
// LADDW rreg ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
ADD R1, R2
// LADDW imm ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
ADD $4, R1
// LMUL rreg ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MUL R1, R2
// LSHW rreg ',' sreg ',' rreg
// {
// outcode(int($1), &$2, int($4), &$6);
// }
SLL R1, R2, R3
// LSHW rreg ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
SLL R1, R2
// LSHW imm ',' sreg ',' rreg
// {
// outcode(int($1), &$2, int($4), &$6);
// }
SLL $4, R1, R2
// LSHW imm ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
SLL $4, R1
//
// move immediate: macro for lui+or, addi, addis, and other combinations
//
// LMOVW imm ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW $1, R1
MOVV $1, R1
// LMOVW ximm ',' rreg
// {
// outcode(int($1), &$2, 0, &$4);
// }
MOVW $1, R1
MOVW $foo(SB), R1
MOVV $1, R1
MOVV $foo(SB), R1
//
// branch
//
// LBRA rel
// {
// outcode(int($1), &nullgen, 0, &$2);
// }
label0:
JMP 1(PC)
JMP label0+0
JAL 1(PC)
JAL label0+0
// LBRA addr
// {
// outcode(int($1), &nullgen, 0, &$2);
// }
JMP 4(R1)
JMP foo+0(SB)
JAL 4(R1)
JAL foo+0(SB)
//
// BEQ/BNE
//
// LBRA rreg ',' rel
// {
// outcode(int($1), &$2, 0, &$4);
// }
label1:
BEQ R1, 1(PC)
BEQ R1, label1
// LBRA rreg ',' sreg ',' rel
// {
// outcode(int($1), &$2, 0, &$4);
// }
label2:
BEQ R1, R2, 1(PC)
BEQ R1, R2, label2
//
// other integer conditional branch
//
// LBRA rreg ',' rel
// {
// outcode(int($1), &$2, 0, &$4);
// }
label3:
BLTZ R1, 1(PC)
BLTZ R1, label3
//
// floating point conditional branch
//
// LBRA rel
label4:
BFPT 1(PC)
BFPT label4
//
// floating point operate
//
// LFCONV freg ',' freg
// {
// outcode(int($1), &$2, 0, &$4);
// }
ABSD F1, F2
// LFADD freg ',' freg
// {
// outcode(int($1), &$2, 0, &$4);
// }
ADDD F1, F2
// LFADD freg ',' freg ',' freg
// {
// outcode(int($1), &$2, int($4.Reg), &$6);
// }
ADDD F1, F2, F3
// LFCMP freg ',' freg
// {
// outcode(int($1), &$2, 0, &$4);
// }
CMPEQD F1, F2
//
// WORD
//
WORD $1
WORD $foo(SB)
//
// NOP
//
// LNOP comma // asm doesn't support the trailing comma.
// {
// outcode(int($1), &nullgen, 0, &nullgen);
// }
NOP
// LNOP rreg comma // asm doesn't support the trailing comma.
// {
// outcode(int($1), &$2, 0, &nullgen);
// }
NOP R2
// LNOP freg comma // asm doesn't support the trailing comma.
// {
// outcode(int($1), &$2, 0, &nullgen);
// }
NOP F2
// LNOP ',' rreg // asm doesn't support the leading comma.
// {
// outcode(int($1), &nullgen, 0, &$3);
// }
NOP R2
// LNOP ',' freg // asm doesn't support the leading comma.
// {
// outcode(int($1), &nullgen, 0, &$3);
// }
NOP F2
// LNOP imm
// {
// outcode(int($1), &$2, 0, &nullgen);
// }
NOP $4
//
// special
//
SYSCALL
BREAK
BREAK $1, (R1) // overloaded CACHE opcode
//
// RET
//
// LRETRN comma // asm doesn't support the trailing comma.
// {
// outcode(int($1), &nullgen, 0, &nullgen);
// }
SYSCALL
RET
// More JMP/JAL cases, and canonical names JMP, CALL.
JAL foo(SB)
JMP foo(SB)
CALL foo(SB)
// END
//
// LEND comma // asm doesn't support the trailing comma.
// {
// outcode(int($1), &nullgen, 0, &nullgen);
// }
END

View File

@ -261,7 +261,7 @@ func (in *Input) macroDefinition(name string) ([]string, []Token) {
// Scan to newline. Backslashes escape newlines.
for tok != '\n' {
if tok == scanner.EOF {
in.Error("missing newline in macro definition for %q\n", name)
in.Error("missing newline in definition for macro:", name)
}
if tok == '\\' {
tok = in.Stack.Next()

View File

@ -124,7 +124,7 @@ func (f *File) ReadGo(name string) {
if f.Ref == nil {
f.Ref = make([]*Ref, 0, 8)
}
f.walk(ast2, "prog", (*File).saveRef)
f.walk(ast2, "prog", (*File).saveExprs)
// Accumulate exported functions.
// The comments are only on ast1 but we need to
@ -163,52 +163,72 @@ func commentText(g *ast.CommentGroup) string {
return strings.Join(pieces, "")
}
// Save various references we are going to need later.
func (f *File) saveExprs(x interface{}, context string) {
switch x := x.(type) {
case *ast.Expr:
switch (*x).(type) {
case *ast.SelectorExpr:
f.saveRef(x, context)
}
case *ast.CallExpr:
f.saveCall(x)
}
}
// Save references to C.xxx for later processing.
func (f *File) saveRef(x interface{}, context string) {
n, ok := x.(*ast.Expr)
func (f *File) saveRef(n *ast.Expr, context string) {
sel := (*n).(*ast.SelectorExpr)
// For now, assume that the only instance of capital C is when
// used as the imported package identifier.
// The parser should take care of scoping in the future, so
// that we will be able to distinguish a "top-level C" from a
// local C.
if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" {
return
}
if context == "as2" {
context = "expr"
}
if context == "embed-type" {
error_(sel.Pos(), "cannot embed C type")
}
goname := sel.Sel.Name
if goname == "errno" {
error_(sel.Pos(), "cannot refer to errno directly; see documentation")
return
}
if goname == "_CMalloc" {
error_(sel.Pos(), "cannot refer to C._CMalloc; use C.malloc")
return
}
if goname == "malloc" {
goname = "_CMalloc"
}
name := f.Name[goname]
if name == nil {
name = &Name{
Go: goname,
}
f.Name[goname] = name
}
f.Ref = append(f.Ref, &Ref{
Name: name,
Expr: n,
Context: context,
})
}
// Save calls to C.xxx for later processing.
func (f *File) saveCall(call *ast.CallExpr) {
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return
}
if sel, ok := (*n).(*ast.SelectorExpr); ok {
// For now, assume that the only instance of capital C is
// when used as the imported package identifier.
// The parser should take care of scoping in the future,
// so that we will be able to distinguish a "top-level C"
// from a local C.
if l, ok := sel.X.(*ast.Ident); ok && l.Name == "C" {
if context == "as2" {
context = "expr"
}
if context == "embed-type" {
error_(sel.Pos(), "cannot embed C type")
}
goname := sel.Sel.Name
if goname == "errno" {
error_(sel.Pos(), "cannot refer to errno directly; see documentation")
return
}
if goname == "_CMalloc" {
error_(sel.Pos(), "cannot refer to C._CMalloc; use C.malloc")
return
}
if goname == "malloc" {
goname = "_CMalloc"
}
name := f.Name[goname]
if name == nil {
name = &Name{
Go: goname,
}
f.Name[goname] = name
}
f.Ref = append(f.Ref, &Ref{
Name: name,
Expr: n,
Context: context,
})
return
}
if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" {
return
}
f.Calls = append(f.Calls, call)
}
// If a function should be exported add it to ExpFunc.

View File

@ -119,6 +119,7 @@ C.short, C.ushort (unsigned short), C.int, C.uint (unsigned int),
C.long, C.ulong (unsigned long), C.longlong (long long),
C.ulonglong (unsigned long long), C.float, C.double.
The C type void* is represented by Go's unsafe.Pointer.
The C types __int128_t and __uint128_t are represented by [16]byte.
To access a struct, union, or enum type directly, prefix it with
struct_, union_, or enum_, as in C.struct_stat.
@ -188,10 +189,10 @@ by making copies of the data. In pseudo-Go definitions:
// C string to Go string
func C.GoString(*C.char) string
// C string, length to Go string
// C data with explicit length to Go string
func C.GoStringN(*C.char, C.int) string
// C pointer, length to Go []byte
// C data with explicit length to Go []byte
func C.GoBytes(unsafe.Pointer, C.int) []byte
C references to Go
@ -221,6 +222,51 @@ definitions and declarations, then the two output files will produce
duplicate symbols and the linker will fail. To avoid this, definitions
must be placed in preambles in other files, or in C source files.
Passing pointers
Go is a garbage collected language, and the garbage collector needs to
know the location of every pointer to Go memory. Because of this,
there are restrictions on passing pointers between Go and C.
In this section the term Go pointer means a pointer to memory
allocated by Go (such as by using the & operator or calling the
predefined new function) and the term C pointer means a pointer to
memory allocated by C (such as by a call to C.malloc). Whether a
pointer is a Go pointer or a C pointer is a dynamic property
determined by how the memory was allocated; it has nothing to do with
the type of the pointer.
Go code may pass a Go pointer to C provided the Go memory to which it
points does not contain any Go pointers. The C code must preserve
this property: it must not store any Go pointers into Go memory, even
temporarily. When passing a pointer to a field in a struct, the Go
memory in question is the memory occupied by the field, not the entire
struct. When passing a pointer to an element in an array or slice,
the Go memory in question is the entire array or the entire backing
array of the slice.
C code may not keep a copy of a Go pointer after the call returns.
If Go code passes a Go pointer to a C function, the C function must
return. There is no specific time limit, but a C function that simply
blocks holding a Go pointer while other goroutines are running may
eventually cause the program to run out of memory and fail (because
the garbage collector may not be able to make progress).
A Go function called by C code may not return a Go pointer. A Go
function called by C code may take C pointers as arguments, and it may
store non-pointer or C pointer data through those pointers, but it may
not store a Go pointer into memory pointed to by a C pointer. A Go
function called by C code may take a Go pointer as an argument, but it
must preserve the property that the Go memory to which it points does
not contain any Go pointers.
These rules are partially enforced by cgo by default. It is possible
to defeat this enforcement by using the unsafe package, and of course
there is nothing stopping the C code from doing anything it likes.
However, programs that break these rules are likely to fail in
unexpected and unpredictable ways.
Using cgo directly
Usage:

View File

@ -167,6 +167,7 @@ func (p *Package) Translate(f *File) {
if len(needType) > 0 {
p.loadDWARF(f, needType)
}
p.rewriteCalls(f)
p.rewriteRef(f)
}
@ -570,6 +571,278 @@ func (p *Package) mangleName(n *Name) {
n.Mangle = prefix + n.Kind + "_" + n.Go
}
// rewriteCalls rewrites all calls that pass pointers to check that
// they follow the rules for passing pointers between Go and C.
func (p *Package) rewriteCalls(f *File) {
for _, call := range f.Calls {
// This is a call to C.xxx; set goname to "xxx".
goname := call.Fun.(*ast.SelectorExpr).Sel.Name
if goname == "malloc" {
continue
}
name := f.Name[goname]
if name.Kind != "func" {
// Probably a type conversion.
continue
}
p.rewriteCall(f, call, name)
}
}
// rewriteCall rewrites one call to add pointer checks. We replace
// each pointer argument x with _cgoCheckPointer(x).(T).
func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) {
for i, param := range name.FuncType.Params {
// An untyped nil does not need a pointer check, and
// when _cgoCheckPointer returns the untyped nil the
// type assertion we are going to insert will fail.
// Easier to just skip nil arguments.
// TODO: Note that this fails if nil is shadowed.
if id, ok := call.Args[i].(*ast.Ident); ok && id.Name == "nil" {
continue
}
if !p.needsPointerCheck(f, param.Go) {
continue
}
if len(call.Args) <= i {
// Avoid a crash; this will be caught when the
// generated file is compiled.
return
}
c := &ast.CallExpr{
Fun: ast.NewIdent("_cgoCheckPointer"),
Args: []ast.Expr{
call.Args[i],
},
}
// Add optional additional arguments for an address
// expression.
if u, ok := call.Args[i].(*ast.UnaryExpr); ok && u.Op == token.AND {
c.Args = p.checkAddrArgs(f, c.Args, u.X)
}
// _cgoCheckPointer returns interface{}.
// We need to type assert that to the type we want.
// If the Go version of this C type uses
// unsafe.Pointer, we can't use a type assertion,
// because the Go file might not import unsafe.
// Instead we use a local variant of _cgoCheckPointer.
var arg ast.Expr
if n := p.unsafeCheckPointerName(param.Go); n != "" {
c.Fun = ast.NewIdent(n)
arg = c
} else {
// In order for the type assertion to succeed,
// we need it to match the actual type of the
// argument. The only type we have is the
// type of the function parameter. We know
// that the argument type must be assignable
// to the function parameter type, or the code
// would not compile, but there is nothing
// requiring that the types be exactly the
// same. Add a type conversion to the
// argument so that the type assertion will
// succeed.
c.Args[0] = &ast.CallExpr{
Fun: param.Go,
Args: []ast.Expr{
c.Args[0],
},
}
arg = &ast.TypeAssertExpr{
X: c,
Type: param.Go,
}
}
call.Args[i] = arg
}
}
// needsPointerCheck returns whether the type t needs a pointer check.
// This is true if t is a pointer and if the value to which it points
// might contain a pointer.
func (p *Package) needsPointerCheck(f *File, t ast.Expr) bool {
return p.hasPointer(f, t, true)
}
// hasPointer is used by needsPointerCheck. If top is true it returns
// whether t is or contains a pointer that might point to a pointer.
// If top is false it returns whether t is or contains a pointer.
func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
switch t := t.(type) {
case *ast.ArrayType:
if t.Len == nil {
if !top {
return true
}
return p.hasPointer(f, t.Elt, false)
}
return p.hasPointer(f, t.Elt, top)
case *ast.StructType:
for _, field := range t.Fields.List {
if p.hasPointer(f, field.Type, top) {
return true
}
}
return false
case *ast.StarExpr: // Pointer type.
if !top {
return true
}
return p.hasPointer(f, t.X, false)
case *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
return true
case *ast.Ident:
// TODO: Handle types defined within function.
for _, d := range p.Decl {
gd, ok := d.(*ast.GenDecl)
if !ok || gd.Tok != token.TYPE {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
if ts.Name.Name == t.Name {
return p.hasPointer(f, ts.Type, top)
}
}
}
if def := typedef[t.Name]; def != nil {
return p.hasPointer(f, def.Go, top)
}
if t.Name == "string" {
return !top
}
if t.Name == "error" {
return true
}
if goTypes[t.Name] != nil {
return false
}
// We can't figure out the type. Conservative
// approach is to assume it has a pointer.
return true
case *ast.SelectorExpr:
if l, ok := t.X.(*ast.Ident); !ok || l.Name != "C" {
// Type defined in a different package.
// Conservative approach is to assume it has a
// pointer.
return true
}
name := f.Name[t.Sel.Name]
if name != nil && name.Kind == "type" && name.Type != nil && name.Type.Go != nil {
return p.hasPointer(f, name.Type.Go, top)
}
// We can't figure out the type. Conservative
// approach is to assume it has a pointer.
return true
default:
error_(t.Pos(), "could not understand type %s", gofmt(t))
return true
}
}
// checkAddrArgs tries to add arguments to the call of
// _cgoCheckPointer when the argument is an address expression. We
// pass true to mean that the argument is an address operation of
// something other than a slice index, which means that it's only
// necessary to check the specific element pointed to, not the entire
// object. This is for &s.f, where f is a field in a struct. We can
// pass a slice or array, meaning that we should check the entire
// slice or array but need not check any other part of the object.
// This is for &s.a[i], where we need to check all of a. However, we
// only pass the slice or array if we can refer to it without side
// effects.
func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr {
index, ok := x.(*ast.IndexExpr)
if !ok {
// This is the address of something that is not an
// index expression. We only need to examine the
// single value to which it points.
// TODO: what is true is shadowed?
return append(args, ast.NewIdent("true"))
}
if !p.hasSideEffects(f, index.X) {
// Examine the entire slice.
return append(args, index.X)
}
// Treat the pointer as unknown.
return args
}
// hasSideEffects returns whether the expression x has any side
// effects. x is an expression, not a statement, so the only side
// effect is a function call.
func (p *Package) hasSideEffects(f *File, x ast.Expr) bool {
found := false
f.walk(x, "expr",
func(f *File, x interface{}, context string) {
switch x.(type) {
case *ast.CallExpr:
found = true
}
})
return found
}
// unsafeCheckPointerName is given the Go version of a C type. If the
// type uses unsafe.Pointer, we arrange to build a version of
// _cgoCheckPointer that returns that type. This avoids using a type
// assertion to unsafe.Pointer in our copy of user code. We return
// the name of the _cgoCheckPointer function we are going to build, or
// the empty string if the type does not use unsafe.Pointer.
func (p *Package) unsafeCheckPointerName(t ast.Expr) string {
if !p.hasUnsafePointer(t) {
return ""
}
var buf bytes.Buffer
conf.Fprint(&buf, fset, t)
s := buf.String()
for i, t := range p.CgoChecks {
if s == t {
return p.unsafeCheckPointerNameIndex(i)
}
}
p.CgoChecks = append(p.CgoChecks, s)
return p.unsafeCheckPointerNameIndex(len(p.CgoChecks) - 1)
}
// hasUnsafePointer returns whether the Go type t uses unsafe.Pointer.
// t is the Go version of a C type, so we don't need to handle every case.
// We only care about direct references, not references via typedefs.
func (p *Package) hasUnsafePointer(t ast.Expr) bool {
switch t := t.(type) {
case *ast.Ident:
return t.Name == "unsafe.Pointer"
case *ast.ArrayType:
return p.hasUnsafePointer(t.Elt)
case *ast.StructType:
for _, f := range t.Fields.List {
if p.hasUnsafePointer(f.Type) {
return true
}
}
case *ast.StarExpr: // Pointer type.
return p.hasUnsafePointer(t.X)
}
return false
}
// unsafeCheckPointerNameIndex returns the name to use for a
// _cgoCheckPointer variant based on the index in the CgoChecks slice.
func (p *Package) unsafeCheckPointerNameIndex(i int) string {
return fmt.Sprintf("_cgoCheckPointer%d", i)
}
// rewriteRef rewrites all the C.xxx references in f.AST to refer to the
// Go equivalents, now that we have figured out the meaning of all
// the xxx. In *godefs mode, rewriteRef replaces the names
@ -1227,6 +1500,11 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
t.Go = c.int32
case 8:
t.Go = c.int64
case 16:
t.Go = &ast.ArrayType{
Len: c.intExpr(t.Size),
Elt: c.uint8,
}
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
@ -1384,6 +1662,11 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
t.Go = c.uint32
case 8:
t.Go = c.uint64
case 16:
t.Go = &ast.ArrayType{
Len: c.intExpr(t.Size),
Elt: c.uint8,
}
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize

View File

@ -42,6 +42,7 @@ type Package struct {
GoFiles []string // list of Go files
GccFiles []string // list of gcc output files
Preamble string // collected preamble for _cgo_export.h
CgoChecks []string // see unsafeCheckPointerName
}
// A File collects information about a single Go input file.
@ -51,6 +52,7 @@ type File struct {
Package string // Package name
Preamble string // C preamble (doc comment on import "C")
Ref []*Ref // all references to C.xxx in AST
Calls []*ast.CallExpr // all calls to C.xxx in AST
ExpFunc []*ExpFunc // exported functions for this file
Name map[string]*Name // map from Go name to Name
}
@ -132,25 +134,29 @@ func usage() {
}
var ptrSizeMap = map[string]int64{
"386": 4,
"amd64": 8,
"arm": 4,
"arm64": 8,
"ppc64": 8,
"ppc64le": 8,
"s390": 4,
"s390x": 8,
"386": 4,
"amd64": 8,
"arm": 4,
"arm64": 8,
"mips64": 8,
"mips64le": 8,
"ppc64": 8,
"ppc64le": 8,
"s390": 4,
"s390x": 8,
}
var intSizeMap = map[string]int64{
"386": 4,
"amd64": 8,
"arm": 4,
"arm64": 8,
"ppc64": 8,
"ppc64le": 8,
"s390": 4,
"s390x": 4,
"386": 4,
"amd64": 8,
"arm": 4,
"arm64": 8,
"mips64": 8,
"mips64le": 8,
"ppc64": 8,
"ppc64le": 8,
"s390": 4,
"s390x": 4,
}
var cPrefix string

View File

@ -108,6 +108,13 @@ func (p *Package) writeDefs() {
fmt.Fprint(fgo2, goProlog)
}
for i, t := range p.CgoChecks {
n := p.unsafeCheckPointerNameIndex(i)
fmt.Fprintf(fgo2, "\nfunc %s(p interface{}, args ...interface{}) %s {\n", n, t)
fmt.Fprintf(fgo2, "\treturn _cgoCheckPointer(p, args...).(%s)\n", t)
fmt.Fprintf(fgo2, "}\n")
}
gccgoSymbolPrefix := p.gccgoSymbolPrefix()
cVars := make(map[string]bool)
@ -814,7 +821,7 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(fgo2, "//go:cgo_export_static _cgoexp%s_%s\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "//go:nosplit\n") // no split stack, so no use of m or g
fmt.Fprintf(fgo2, "//go:norace\n") // must not have race detector calls inserted
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32) {", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "func _cgoexp%s_%s(a unsafe.Pointer, n int32) {\n", cPrefix, exp.ExpName)
fmt.Fprintf(fgo2, "\tfn := %s\n", goname)
// The indirect here is converting from a Go function pointer to a C function pointer.
fmt.Fprintf(fgo2, "\t_cgo_runtime_cgocallback(**(**unsafe.Pointer)(unsafe.Pointer(&fn)), a, uintptr(n));\n")
@ -1241,6 +1248,9 @@ func _cgo_runtime_cmalloc(uintptr) unsafe.Pointer
//go:linkname _cgo_runtime_cgocallback runtime.cgocallback
func _cgo_runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr)
//go:linkname _cgoCheckPointer runtime.cgoCheckPointer
func _cgoCheckPointer(interface{}, ...interface{}) interface{}
`
const goStringDef = `

View File

@ -66,6 +66,8 @@ Flags:
Write memory profile for the compilation to file.
-memprofilerate rate
Set runtime.MemProfileRate for the compilation to rate.
-msan
Insert calls to C/C++ memory sanitizer.
-nolocalimports
Disallow local (relative) imports.
-o file
@ -77,6 +79,8 @@ Flags:
Write a package (archive) file rather than an object file
-race
Compile with race detector enabled.
-trimpath prefix
Remove prefix from recorded source file paths.
-u
Disallow importing packages not marked as safe; implies -nolocalimports.
@ -107,7 +111,7 @@ The //line directive is an historical special case; all other directives are of
The //go:noescape directive specifies that the next declaration in the file, which
must be a func without a body (meaning that it has an implementation not written
in Go) does not allow any of the pointers passed as arguments to escape into the
heap or into the values returned from the function. This information can be used as
heap or into the values returned from the function. This information can be used
during the compiler's escape analysis of Go code calling the function.
//go:nosplit

View File

@ -192,7 +192,7 @@ var panicdiv *gc.Node
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
@ -335,7 +335,8 @@ func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
x.Type = t
oldx.Etype = r // squirrel away old r value
// TODO(marvin): Fix Node.EType type union.
oldx.Etype = gc.EType(r) // squirrel away old r value
gc.SetReg(dr, 1)
}
}
@ -389,7 +390,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
@ -508,7 +509,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}

View File

@ -100,7 +100,7 @@ func ginscon(as int, c int64, n2 *gc.Node) {
gins(as, &n1, n2)
}
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
@ -108,7 +108,15 @@ func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
}
// General case.
var r1, r2, g1, g2 gc.Node
if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
// A special case to make write barriers more efficient.
// Comparing the first field of a named struct can be done directly.
base := n1
if n1.Op == gc.ODOT && n1.Left.Type.Etype == gc.TSTRUCT && n1.Left.Type.Type.Sym == n1.Right.Sym {
base = n1.Left
}
if base.Op == gc.ONAME && base.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG {
r1 = *n1
} else {
gc.Regalloc(&r1, t, n1)
@ -673,514 +681,547 @@ func ginsnop() {
/*
* return Axxx for Oxxx on type t.
*/
func optoas(op int, t *gc.Type) int {
func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
// avoid constant conversions in switches below
const (
OMINUS_ = uint32(gc.OMINUS) << 16
OLSH_ = uint32(gc.OLSH) << 16
ORSH_ = uint32(gc.ORSH) << 16
OADD_ = uint32(gc.OADD) << 16
OSUB_ = uint32(gc.OSUB) << 16
OMUL_ = uint32(gc.OMUL) << 16
ODIV_ = uint32(gc.ODIV) << 16
OMOD_ = uint32(gc.OMOD) << 16
OOR_ = uint32(gc.OOR) << 16
OAND_ = uint32(gc.OAND) << 16
OXOR_ = uint32(gc.OXOR) << 16
OEQ_ = uint32(gc.OEQ) << 16
ONE_ = uint32(gc.ONE) << 16
OLT_ = uint32(gc.OLT) << 16
OLE_ = uint32(gc.OLE) << 16
OGE_ = uint32(gc.OGE) << 16
OGT_ = uint32(gc.OGT) << 16
OCMP_ = uint32(gc.OCMP) << 16
OPS_ = uint32(gc.OPS) << 16
OPC_ = uint32(gc.OPC) << 16
OAS_ = uint32(gc.OAS) << 16
OHMUL_ = uint32(gc.OHMUL) << 16
OSQRT_ = uint32(gc.OSQRT) << 16
OADDR_ = uint32(gc.OADDR) << 16
OINC_ = uint32(gc.OINC) << 16
ODEC_ = uint32(gc.ODEC) << 16
OLROT_ = uint32(gc.OLROT) << 16
ORROTC_ = uint32(gc.ORROTC) << 16
OEXTEND_ = uint32(gc.OEXTEND) << 16
)
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
case gc.OADDR<<16 | gc.TPTR32:
case OADDR_ | gc.TPTR32:
a = x86.ALEAL
case gc.OADDR<<16 | gc.TPTR64:
case OADDR_ | gc.TPTR64:
a = x86.ALEAQ
case gc.OEQ<<16 | gc.TBOOL,
gc.OEQ<<16 | gc.TINT8,
gc.OEQ<<16 | gc.TUINT8,
gc.OEQ<<16 | gc.TINT16,
gc.OEQ<<16 | gc.TUINT16,
gc.OEQ<<16 | gc.TINT32,
gc.OEQ<<16 | gc.TUINT32,
gc.OEQ<<16 | gc.TINT64,
gc.OEQ<<16 | gc.TUINT64,
gc.OEQ<<16 | gc.TPTR32,
gc.OEQ<<16 | gc.TPTR64,
gc.OEQ<<16 | gc.TFLOAT32,
gc.OEQ<<16 | gc.TFLOAT64:
case OEQ_ | gc.TBOOL,
OEQ_ | gc.TINT8,
OEQ_ | gc.TUINT8,
OEQ_ | gc.TINT16,
OEQ_ | gc.TUINT16,
OEQ_ | gc.TINT32,
OEQ_ | gc.TUINT32,
OEQ_ | gc.TINT64,
OEQ_ | gc.TUINT64,
OEQ_ | gc.TPTR32,
OEQ_ | gc.TPTR64,
OEQ_ | gc.TFLOAT32,
OEQ_ | gc.TFLOAT64:
a = x86.AJEQ
case gc.ONE<<16 | gc.TBOOL,
gc.ONE<<16 | gc.TINT8,
gc.ONE<<16 | gc.TUINT8,
gc.ONE<<16 | gc.TINT16,
gc.ONE<<16 | gc.TUINT16,
gc.ONE<<16 | gc.TINT32,
gc.ONE<<16 | gc.TUINT32,
gc.ONE<<16 | gc.TINT64,
gc.ONE<<16 | gc.TUINT64,
gc.ONE<<16 | gc.TPTR32,
gc.ONE<<16 | gc.TPTR64,
gc.ONE<<16 | gc.TFLOAT32,
gc.ONE<<16 | gc.TFLOAT64:
case ONE_ | gc.TBOOL,
ONE_ | gc.TINT8,
ONE_ | gc.TUINT8,
ONE_ | gc.TINT16,
ONE_ | gc.TUINT16,
ONE_ | gc.TINT32,
ONE_ | gc.TUINT32,
ONE_ | gc.TINT64,
ONE_ | gc.TUINT64,
ONE_ | gc.TPTR32,
ONE_ | gc.TPTR64,
ONE_ | gc.TFLOAT32,
ONE_ | gc.TFLOAT64:
a = x86.AJNE
case gc.OPS<<16 | gc.TBOOL,
gc.OPS<<16 | gc.TINT8,
gc.OPS<<16 | gc.TUINT8,
gc.OPS<<16 | gc.TINT16,
gc.OPS<<16 | gc.TUINT16,
gc.OPS<<16 | gc.TINT32,
gc.OPS<<16 | gc.TUINT32,
gc.OPS<<16 | gc.TINT64,
gc.OPS<<16 | gc.TUINT64,
gc.OPS<<16 | gc.TPTR32,
gc.OPS<<16 | gc.TPTR64,
gc.OPS<<16 | gc.TFLOAT32,
gc.OPS<<16 | gc.TFLOAT64:
case OPS_ | gc.TBOOL,
OPS_ | gc.TINT8,
OPS_ | gc.TUINT8,
OPS_ | gc.TINT16,
OPS_ | gc.TUINT16,
OPS_ | gc.TINT32,
OPS_ | gc.TUINT32,
OPS_ | gc.TINT64,
OPS_ | gc.TUINT64,
OPS_ | gc.TPTR32,
OPS_ | gc.TPTR64,
OPS_ | gc.TFLOAT32,
OPS_ | gc.TFLOAT64:
a = x86.AJPS
case gc.OPC<<16 | gc.TBOOL,
gc.OPC<<16 | gc.TINT8,
gc.OPC<<16 | gc.TUINT8,
gc.OPC<<16 | gc.TINT16,
gc.OPC<<16 | gc.TUINT16,
gc.OPC<<16 | gc.TINT32,
gc.OPC<<16 | gc.TUINT32,
gc.OPC<<16 | gc.TINT64,
gc.OPC<<16 | gc.TUINT64,
gc.OPC<<16 | gc.TPTR32,
gc.OPC<<16 | gc.TPTR64,
gc.OPC<<16 | gc.TFLOAT32,
gc.OPC<<16 | gc.TFLOAT64:
case OPC_ | gc.TBOOL,
OPC_ | gc.TINT8,
OPC_ | gc.TUINT8,
OPC_ | gc.TINT16,
OPC_ | gc.TUINT16,
OPC_ | gc.TINT32,
OPC_ | gc.TUINT32,
OPC_ | gc.TINT64,
OPC_ | gc.TUINT64,
OPC_ | gc.TPTR32,
OPC_ | gc.TPTR64,
OPC_ | gc.TFLOAT32,
OPC_ | gc.TFLOAT64:
a = x86.AJPC
case gc.OLT<<16 | gc.TINT8,
gc.OLT<<16 | gc.TINT16,
gc.OLT<<16 | gc.TINT32,
gc.OLT<<16 | gc.TINT64:
case OLT_ | gc.TINT8,
OLT_ | gc.TINT16,
OLT_ | gc.TINT32,
OLT_ | gc.TINT64:
a = x86.AJLT
case gc.OLT<<16 | gc.TUINT8,
gc.OLT<<16 | gc.TUINT16,
gc.OLT<<16 | gc.TUINT32,
gc.OLT<<16 | gc.TUINT64:
case OLT_ | gc.TUINT8,
OLT_ | gc.TUINT16,
OLT_ | gc.TUINT32,
OLT_ | gc.TUINT64:
a = x86.AJCS
case gc.OLE<<16 | gc.TINT8,
gc.OLE<<16 | gc.TINT16,
gc.OLE<<16 | gc.TINT32,
gc.OLE<<16 | gc.TINT64:
case OLE_ | gc.TINT8,
OLE_ | gc.TINT16,
OLE_ | gc.TINT32,
OLE_ | gc.TINT64:
a = x86.AJLE
case gc.OLE<<16 | gc.TUINT8,
gc.OLE<<16 | gc.TUINT16,
gc.OLE<<16 | gc.TUINT32,
gc.OLE<<16 | gc.TUINT64:
case OLE_ | gc.TUINT8,
OLE_ | gc.TUINT16,
OLE_ | gc.TUINT32,
OLE_ | gc.TUINT64:
a = x86.AJLS
case gc.OGT<<16 | gc.TINT8,
gc.OGT<<16 | gc.TINT16,
gc.OGT<<16 | gc.TINT32,
gc.OGT<<16 | gc.TINT64:
case OGT_ | gc.TINT8,
OGT_ | gc.TINT16,
OGT_ | gc.TINT32,
OGT_ | gc.TINT64:
a = x86.AJGT
case gc.OGT<<16 | gc.TUINT8,
gc.OGT<<16 | gc.TUINT16,
gc.OGT<<16 | gc.TUINT32,
gc.OGT<<16 | gc.TUINT64,
gc.OLT<<16 | gc.TFLOAT32,
gc.OLT<<16 | gc.TFLOAT64:
case OGT_ | gc.TUINT8,
OGT_ | gc.TUINT16,
OGT_ | gc.TUINT32,
OGT_ | gc.TUINT64,
OLT_ | gc.TFLOAT32,
OLT_ | gc.TFLOAT64:
a = x86.AJHI
case gc.OGE<<16 | gc.TINT8,
gc.OGE<<16 | gc.TINT16,
gc.OGE<<16 | gc.TINT32,
gc.OGE<<16 | gc.TINT64:
case OGE_ | gc.TINT8,
OGE_ | gc.TINT16,
OGE_ | gc.TINT32,
OGE_ | gc.TINT64:
a = x86.AJGE
case gc.OGE<<16 | gc.TUINT8,
gc.OGE<<16 | gc.TUINT16,
gc.OGE<<16 | gc.TUINT32,
gc.OGE<<16 | gc.TUINT64,
gc.OLE<<16 | gc.TFLOAT32,
gc.OLE<<16 | gc.TFLOAT64:
case OGE_ | gc.TUINT8,
OGE_ | gc.TUINT16,
OGE_ | gc.TUINT32,
OGE_ | gc.TUINT64,
OLE_ | gc.TFLOAT32,
OLE_ | gc.TFLOAT64:
a = x86.AJCC
case gc.OCMP<<16 | gc.TBOOL,
gc.OCMP<<16 | gc.TINT8,
gc.OCMP<<16 | gc.TUINT8:
case OCMP_ | gc.TBOOL,
OCMP_ | gc.TINT8,
OCMP_ | gc.TUINT8:
a = x86.ACMPB
case gc.OCMP<<16 | gc.TINT16,
gc.OCMP<<16 | gc.TUINT16:
case OCMP_ | gc.TINT16,
OCMP_ | gc.TUINT16:
a = x86.ACMPW
case gc.OCMP<<16 | gc.TINT32,
gc.OCMP<<16 | gc.TUINT32,
gc.OCMP<<16 | gc.TPTR32:
case OCMP_ | gc.TINT32,
OCMP_ | gc.TUINT32,
OCMP_ | gc.TPTR32:
a = x86.ACMPL
case gc.OCMP<<16 | gc.TINT64,
gc.OCMP<<16 | gc.TUINT64,
gc.OCMP<<16 | gc.TPTR64:
case OCMP_ | gc.TINT64,
OCMP_ | gc.TUINT64,
OCMP_ | gc.TPTR64:
a = x86.ACMPQ
case gc.OCMP<<16 | gc.TFLOAT32:
case OCMP_ | gc.TFLOAT32:
a = x86.AUCOMISS
case gc.OCMP<<16 | gc.TFLOAT64:
case OCMP_ | gc.TFLOAT64:
a = x86.AUCOMISD
case gc.OAS<<16 | gc.TBOOL,
gc.OAS<<16 | gc.TINT8,
gc.OAS<<16 | gc.TUINT8:
case OAS_ | gc.TBOOL,
OAS_ | gc.TINT8,
OAS_ | gc.TUINT8:
a = x86.AMOVB
case gc.OAS<<16 | gc.TINT16,
gc.OAS<<16 | gc.TUINT16:
case OAS_ | gc.TINT16,
OAS_ | gc.TUINT16:
a = x86.AMOVW
case gc.OAS<<16 | gc.TINT32,
gc.OAS<<16 | gc.TUINT32,
gc.OAS<<16 | gc.TPTR32:
case OAS_ | gc.TINT32,
OAS_ | gc.TUINT32,
OAS_ | gc.TPTR32:
a = x86.AMOVL
case gc.OAS<<16 | gc.TINT64,
gc.OAS<<16 | gc.TUINT64,
gc.OAS<<16 | gc.TPTR64:
case OAS_ | gc.TINT64,
OAS_ | gc.TUINT64,
OAS_ | gc.TPTR64:
a = x86.AMOVQ
case gc.OAS<<16 | gc.TFLOAT32:
case OAS_ | gc.TFLOAT32:
a = x86.AMOVSS
case gc.OAS<<16 | gc.TFLOAT64:
case OAS_ | gc.TFLOAT64:
a = x86.AMOVSD
case gc.OADD<<16 | gc.TINT8,
gc.OADD<<16 | gc.TUINT8:
case OADD_ | gc.TINT8,
OADD_ | gc.TUINT8:
a = x86.AADDB
case gc.OADD<<16 | gc.TINT16,
gc.OADD<<16 | gc.TUINT16:
case OADD_ | gc.TINT16,
OADD_ | gc.TUINT16:
a = x86.AADDW
case gc.OADD<<16 | gc.TINT32,
gc.OADD<<16 | gc.TUINT32,
gc.OADD<<16 | gc.TPTR32:
case OADD_ | gc.TINT32,
OADD_ | gc.TUINT32,
OADD_ | gc.TPTR32:
a = x86.AADDL
case gc.OADD<<16 | gc.TINT64,
gc.OADD<<16 | gc.TUINT64,
gc.OADD<<16 | gc.TPTR64:
case OADD_ | gc.TINT64,
OADD_ | gc.TUINT64,
OADD_ | gc.TPTR64:
a = x86.AADDQ
case gc.OADD<<16 | gc.TFLOAT32:
case OADD_ | gc.TFLOAT32:
a = x86.AADDSS
case gc.OADD<<16 | gc.TFLOAT64:
case OADD_ | gc.TFLOAT64:
a = x86.AADDSD
case gc.OSUB<<16 | gc.TINT8,
gc.OSUB<<16 | gc.TUINT8:
case OSUB_ | gc.TINT8,
OSUB_ | gc.TUINT8:
a = x86.ASUBB
case gc.OSUB<<16 | gc.TINT16,
gc.OSUB<<16 | gc.TUINT16:
case OSUB_ | gc.TINT16,
OSUB_ | gc.TUINT16:
a = x86.ASUBW
case gc.OSUB<<16 | gc.TINT32,
gc.OSUB<<16 | gc.TUINT32,
gc.OSUB<<16 | gc.TPTR32:
case OSUB_ | gc.TINT32,
OSUB_ | gc.TUINT32,
OSUB_ | gc.TPTR32:
a = x86.ASUBL
case gc.OSUB<<16 | gc.TINT64,
gc.OSUB<<16 | gc.TUINT64,
gc.OSUB<<16 | gc.TPTR64:
case OSUB_ | gc.TINT64,
OSUB_ | gc.TUINT64,
OSUB_ | gc.TPTR64:
a = x86.ASUBQ
case gc.OSUB<<16 | gc.TFLOAT32:
case OSUB_ | gc.TFLOAT32:
a = x86.ASUBSS
case gc.OSUB<<16 | gc.TFLOAT64:
case OSUB_ | gc.TFLOAT64:
a = x86.ASUBSD
case gc.OINC<<16 | gc.TINT8,
gc.OINC<<16 | gc.TUINT8:
case OINC_ | gc.TINT8,
OINC_ | gc.TUINT8:
a = x86.AINCB
case gc.OINC<<16 | gc.TINT16,
gc.OINC<<16 | gc.TUINT16:
case OINC_ | gc.TINT16,
OINC_ | gc.TUINT16:
a = x86.AINCW
case gc.OINC<<16 | gc.TINT32,
gc.OINC<<16 | gc.TUINT32,
gc.OINC<<16 | gc.TPTR32:
case OINC_ | gc.TINT32,
OINC_ | gc.TUINT32,
OINC_ | gc.TPTR32:
a = x86.AINCL
case gc.OINC<<16 | gc.TINT64,
gc.OINC<<16 | gc.TUINT64,
gc.OINC<<16 | gc.TPTR64:
case OINC_ | gc.TINT64,
OINC_ | gc.TUINT64,
OINC_ | gc.TPTR64:
a = x86.AINCQ
case gc.ODEC<<16 | gc.TINT8,
gc.ODEC<<16 | gc.TUINT8:
case ODEC_ | gc.TINT8,
ODEC_ | gc.TUINT8:
a = x86.ADECB
case gc.ODEC<<16 | gc.TINT16,
gc.ODEC<<16 | gc.TUINT16:
case ODEC_ | gc.TINT16,
ODEC_ | gc.TUINT16:
a = x86.ADECW
case gc.ODEC<<16 | gc.TINT32,
gc.ODEC<<16 | gc.TUINT32,
gc.ODEC<<16 | gc.TPTR32:
case ODEC_ | gc.TINT32,
ODEC_ | gc.TUINT32,
ODEC_ | gc.TPTR32:
a = x86.ADECL
case gc.ODEC<<16 | gc.TINT64,
gc.ODEC<<16 | gc.TUINT64,
gc.ODEC<<16 | gc.TPTR64:
case ODEC_ | gc.TINT64,
ODEC_ | gc.TUINT64,
ODEC_ | gc.TPTR64:
a = x86.ADECQ
case gc.OMINUS<<16 | gc.TINT8,
gc.OMINUS<<16 | gc.TUINT8:
case OMINUS_ | gc.TINT8,
OMINUS_ | gc.TUINT8:
a = x86.ANEGB
case gc.OMINUS<<16 | gc.TINT16,
gc.OMINUS<<16 | gc.TUINT16:
case OMINUS_ | gc.TINT16,
OMINUS_ | gc.TUINT16:
a = x86.ANEGW
case gc.OMINUS<<16 | gc.TINT32,
gc.OMINUS<<16 | gc.TUINT32,
gc.OMINUS<<16 | gc.TPTR32:
case OMINUS_ | gc.TINT32,
OMINUS_ | gc.TUINT32,
OMINUS_ | gc.TPTR32:
a = x86.ANEGL
case gc.OMINUS<<16 | gc.TINT64,
gc.OMINUS<<16 | gc.TUINT64,
gc.OMINUS<<16 | gc.TPTR64:
case OMINUS_ | gc.TINT64,
OMINUS_ | gc.TUINT64,
OMINUS_ | gc.TPTR64:
a = x86.ANEGQ
case gc.OAND<<16 | gc.TBOOL,
gc.OAND<<16 | gc.TINT8,
gc.OAND<<16 | gc.TUINT8:
case OAND_ | gc.TBOOL,
OAND_ | gc.TINT8,
OAND_ | gc.TUINT8:
a = x86.AANDB
case gc.OAND<<16 | gc.TINT16,
gc.OAND<<16 | gc.TUINT16:
case OAND_ | gc.TINT16,
OAND_ | gc.TUINT16:
a = x86.AANDW
case gc.OAND<<16 | gc.TINT32,
gc.OAND<<16 | gc.TUINT32,
gc.OAND<<16 | gc.TPTR32:
case OAND_ | gc.TINT32,
OAND_ | gc.TUINT32,
OAND_ | gc.TPTR32:
a = x86.AANDL
case gc.OAND<<16 | gc.TINT64,
gc.OAND<<16 | gc.TUINT64,
gc.OAND<<16 | gc.TPTR64:
case OAND_ | gc.TINT64,
OAND_ | gc.TUINT64,
OAND_ | gc.TPTR64:
a = x86.AANDQ
case gc.OOR<<16 | gc.TBOOL,
gc.OOR<<16 | gc.TINT8,
gc.OOR<<16 | gc.TUINT8:
case OOR_ | gc.TBOOL,
OOR_ | gc.TINT8,
OOR_ | gc.TUINT8:
a = x86.AORB
case gc.OOR<<16 | gc.TINT16,
gc.OOR<<16 | gc.TUINT16:
case OOR_ | gc.TINT16,
OOR_ | gc.TUINT16:
a = x86.AORW
case gc.OOR<<16 | gc.TINT32,
gc.OOR<<16 | gc.TUINT32,
gc.OOR<<16 | gc.TPTR32:
case OOR_ | gc.TINT32,
OOR_ | gc.TUINT32,
OOR_ | gc.TPTR32:
a = x86.AORL
case gc.OOR<<16 | gc.TINT64,
gc.OOR<<16 | gc.TUINT64,
gc.OOR<<16 | gc.TPTR64:
case OOR_ | gc.TINT64,
OOR_ | gc.TUINT64,
OOR_ | gc.TPTR64:
a = x86.AORQ
case gc.OXOR<<16 | gc.TINT8,
gc.OXOR<<16 | gc.TUINT8:
case OXOR_ | gc.TINT8,
OXOR_ | gc.TUINT8:
a = x86.AXORB
case gc.OXOR<<16 | gc.TINT16,
gc.OXOR<<16 | gc.TUINT16:
case OXOR_ | gc.TINT16,
OXOR_ | gc.TUINT16:
a = x86.AXORW
case gc.OXOR<<16 | gc.TINT32,
gc.OXOR<<16 | gc.TUINT32,
gc.OXOR<<16 | gc.TPTR32:
case OXOR_ | gc.TINT32,
OXOR_ | gc.TUINT32,
OXOR_ | gc.TPTR32:
a = x86.AXORL
case gc.OXOR<<16 | gc.TINT64,
gc.OXOR<<16 | gc.TUINT64,
gc.OXOR<<16 | gc.TPTR64:
case OXOR_ | gc.TINT64,
OXOR_ | gc.TUINT64,
OXOR_ | gc.TPTR64:
a = x86.AXORQ
case gc.OLROT<<16 | gc.TINT8,
gc.OLROT<<16 | gc.TUINT8:
case OLROT_ | gc.TINT8,
OLROT_ | gc.TUINT8:
a = x86.AROLB
case gc.OLROT<<16 | gc.TINT16,
gc.OLROT<<16 | gc.TUINT16:
case OLROT_ | gc.TINT16,
OLROT_ | gc.TUINT16:
a = x86.AROLW
case gc.OLROT<<16 | gc.TINT32,
gc.OLROT<<16 | gc.TUINT32,
gc.OLROT<<16 | gc.TPTR32:
case OLROT_ | gc.TINT32,
OLROT_ | gc.TUINT32,
OLROT_ | gc.TPTR32:
a = x86.AROLL
case gc.OLROT<<16 | gc.TINT64,
gc.OLROT<<16 | gc.TUINT64,
gc.OLROT<<16 | gc.TPTR64:
case OLROT_ | gc.TINT64,
OLROT_ | gc.TUINT64,
OLROT_ | gc.TPTR64:
a = x86.AROLQ
case gc.OLSH<<16 | gc.TINT8,
gc.OLSH<<16 | gc.TUINT8:
case OLSH_ | gc.TINT8,
OLSH_ | gc.TUINT8:
a = x86.ASHLB
case gc.OLSH<<16 | gc.TINT16,
gc.OLSH<<16 | gc.TUINT16:
case OLSH_ | gc.TINT16,
OLSH_ | gc.TUINT16:
a = x86.ASHLW
case gc.OLSH<<16 | gc.TINT32,
gc.OLSH<<16 | gc.TUINT32,
gc.OLSH<<16 | gc.TPTR32:
case OLSH_ | gc.TINT32,
OLSH_ | gc.TUINT32,
OLSH_ | gc.TPTR32:
a = x86.ASHLL
case gc.OLSH<<16 | gc.TINT64,
gc.OLSH<<16 | gc.TUINT64,
gc.OLSH<<16 | gc.TPTR64:
case OLSH_ | gc.TINT64,
OLSH_ | gc.TUINT64,
OLSH_ | gc.TPTR64:
a = x86.ASHLQ
case gc.ORSH<<16 | gc.TUINT8:
case ORSH_ | gc.TUINT8:
a = x86.ASHRB
case gc.ORSH<<16 | gc.TUINT16:
case ORSH_ | gc.TUINT16:
a = x86.ASHRW
case gc.ORSH<<16 | gc.TUINT32,
gc.ORSH<<16 | gc.TPTR32:
case ORSH_ | gc.TUINT32,
ORSH_ | gc.TPTR32:
a = x86.ASHRL
case gc.ORSH<<16 | gc.TUINT64,
gc.ORSH<<16 | gc.TPTR64:
case ORSH_ | gc.TUINT64,
ORSH_ | gc.TPTR64:
a = x86.ASHRQ
case gc.ORSH<<16 | gc.TINT8:
case ORSH_ | gc.TINT8:
a = x86.ASARB
case gc.ORSH<<16 | gc.TINT16:
case ORSH_ | gc.TINT16:
a = x86.ASARW
case gc.ORSH<<16 | gc.TINT32:
case ORSH_ | gc.TINT32:
a = x86.ASARL
case gc.ORSH<<16 | gc.TINT64:
case ORSH_ | gc.TINT64:
a = x86.ASARQ
case gc.ORROTC<<16 | gc.TINT8,
gc.ORROTC<<16 | gc.TUINT8:
case ORROTC_ | gc.TINT8,
ORROTC_ | gc.TUINT8:
a = x86.ARCRB
case gc.ORROTC<<16 | gc.TINT16,
gc.ORROTC<<16 | gc.TUINT16:
case ORROTC_ | gc.TINT16,
ORROTC_ | gc.TUINT16:
a = x86.ARCRW
case gc.ORROTC<<16 | gc.TINT32,
gc.ORROTC<<16 | gc.TUINT32:
case ORROTC_ | gc.TINT32,
ORROTC_ | gc.TUINT32:
a = x86.ARCRL
case gc.ORROTC<<16 | gc.TINT64,
gc.ORROTC<<16 | gc.TUINT64:
case ORROTC_ | gc.TINT64,
ORROTC_ | gc.TUINT64:
a = x86.ARCRQ
case gc.OHMUL<<16 | gc.TINT8,
gc.OMUL<<16 | gc.TINT8,
gc.OMUL<<16 | gc.TUINT8:
case OHMUL_ | gc.TINT8,
OMUL_ | gc.TINT8,
OMUL_ | gc.TUINT8:
a = x86.AIMULB
case gc.OHMUL<<16 | gc.TINT16,
gc.OMUL<<16 | gc.TINT16,
gc.OMUL<<16 | gc.TUINT16:
case OHMUL_ | gc.TINT16,
OMUL_ | gc.TINT16,
OMUL_ | gc.TUINT16:
a = x86.AIMULW
case gc.OHMUL<<16 | gc.TINT32,
gc.OMUL<<16 | gc.TINT32,
gc.OMUL<<16 | gc.TUINT32,
gc.OMUL<<16 | gc.TPTR32:
case OHMUL_ | gc.TINT32,
OMUL_ | gc.TINT32,
OMUL_ | gc.TUINT32,
OMUL_ | gc.TPTR32:
a = x86.AIMULL
case gc.OHMUL<<16 | gc.TINT64,
gc.OMUL<<16 | gc.TINT64,
gc.OMUL<<16 | gc.TUINT64,
gc.OMUL<<16 | gc.TPTR64:
case OHMUL_ | gc.TINT64,
OMUL_ | gc.TINT64,
OMUL_ | gc.TUINT64,
OMUL_ | gc.TPTR64:
a = x86.AIMULQ
case gc.OHMUL<<16 | gc.TUINT8:
case OHMUL_ | gc.TUINT8:
a = x86.AMULB
case gc.OHMUL<<16 | gc.TUINT16:
case OHMUL_ | gc.TUINT16:
a = x86.AMULW
case gc.OHMUL<<16 | gc.TUINT32,
gc.OHMUL<<16 | gc.TPTR32:
case OHMUL_ | gc.TUINT32,
OHMUL_ | gc.TPTR32:
a = x86.AMULL
case gc.OHMUL<<16 | gc.TUINT64,
gc.OHMUL<<16 | gc.TPTR64:
case OHMUL_ | gc.TUINT64,
OHMUL_ | gc.TPTR64:
a = x86.AMULQ
case gc.OMUL<<16 | gc.TFLOAT32:
case OMUL_ | gc.TFLOAT32:
a = x86.AMULSS
case gc.OMUL<<16 | gc.TFLOAT64:
case OMUL_ | gc.TFLOAT64:
a = x86.AMULSD
case gc.ODIV<<16 | gc.TINT8,
gc.OMOD<<16 | gc.TINT8:
case ODIV_ | gc.TINT8,
OMOD_ | gc.TINT8:
a = x86.AIDIVB
case gc.ODIV<<16 | gc.TUINT8,
gc.OMOD<<16 | gc.TUINT8:
case ODIV_ | gc.TUINT8,
OMOD_ | gc.TUINT8:
a = x86.ADIVB
case gc.ODIV<<16 | gc.TINT16,
gc.OMOD<<16 | gc.TINT16:
case ODIV_ | gc.TINT16,
OMOD_ | gc.TINT16:
a = x86.AIDIVW
case gc.ODIV<<16 | gc.TUINT16,
gc.OMOD<<16 | gc.TUINT16:
case ODIV_ | gc.TUINT16,
OMOD_ | gc.TUINT16:
a = x86.ADIVW
case gc.ODIV<<16 | gc.TINT32,
gc.OMOD<<16 | gc.TINT32:
case ODIV_ | gc.TINT32,
OMOD_ | gc.TINT32:
a = x86.AIDIVL
case gc.ODIV<<16 | gc.TUINT32,
gc.ODIV<<16 | gc.TPTR32,
gc.OMOD<<16 | gc.TUINT32,
gc.OMOD<<16 | gc.TPTR32:
case ODIV_ | gc.TUINT32,
ODIV_ | gc.TPTR32,
OMOD_ | gc.TUINT32,
OMOD_ | gc.TPTR32:
a = x86.ADIVL
case gc.ODIV<<16 | gc.TINT64,
gc.OMOD<<16 | gc.TINT64:
case ODIV_ | gc.TINT64,
OMOD_ | gc.TINT64:
a = x86.AIDIVQ
case gc.ODIV<<16 | gc.TUINT64,
gc.ODIV<<16 | gc.TPTR64,
gc.OMOD<<16 | gc.TUINT64,
gc.OMOD<<16 | gc.TPTR64:
case ODIV_ | gc.TUINT64,
ODIV_ | gc.TPTR64,
OMOD_ | gc.TUINT64,
OMOD_ | gc.TPTR64:
a = x86.ADIVQ
case gc.OEXTEND<<16 | gc.TINT16:
case OEXTEND_ | gc.TINT16:
a = x86.ACWD
case gc.OEXTEND<<16 | gc.TINT32:
case OEXTEND_ | gc.TINT32:
a = x86.ACDQ
case gc.OEXTEND<<16 | gc.TINT64:
case OEXTEND_ | gc.TINT64:
a = x86.ACQO
case gc.ODIV<<16 | gc.TFLOAT32:
case ODIV_ | gc.TFLOAT32:
a = x86.ADIVSS
case gc.ODIV<<16 | gc.TFLOAT64:
case ODIV_ | gc.TFLOAT64:
a = x86.ADIVSD
case gc.OSQRT<<16 | gc.TFLOAT64:
case OSQRT_ | gc.TFLOAT64:
a = x86.ASQRTSD
}

View File

@ -823,6 +823,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 2
}
if (p.Info.Reguse|p.Info.Regset)&FtoB(int(v.Reg)) != 0 {
return 2
}
if p.Info.Flags&gc.LeftAddr != 0 {
if copyas(&p.From, v) {
return 2

View File

@ -141,7 +141,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
x86.AMOVSL: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
x86.AMOVSQ: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
x86.AMOVSW: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI},
obj.ADUFFCOPY: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI | CX},
obj.ADUFFCOPY: {Flags: gc.OK, Reguse: DI | SI, Regset: DI | SI | X0},
x86.AMOVSD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
x86.AMOVSS: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move},
@ -228,7 +228,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
x86.ASTOSL: {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
x86.ASTOSQ: {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
x86.ASTOSW: {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
obj.ADUFFZERO: {Flags: gc.OK, Reguse: AX | DI, Regset: DI},
obj.ADUFFZERO: {Flags: gc.OK, Reguse: X0 | DI, Regset: DI},
x86.ASUBB: {Flags: gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry},
x86.ASUBL: {Flags: gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry},
x86.ASUBQ: {Flags: gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry},

View File

@ -107,6 +107,7 @@ const (
DI = 1 << (x86.REG_DI - x86.REG_AX)
SI = 1 << (x86.REG_SI - x86.REG_AX)
R15 = 1 << (x86.REG_R15 - x86.REG_AX)
X0 = 1 << 16
)
func RtoB(r int) uint64 {

View File

@ -741,9 +741,9 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo2, &n1)
gins(optoas(int(n.Op), lo1.Type), &n1, &al)
gins(optoas(n.Op, lo1.Type), &n1, &al)
gins(arm.AMOVW, &hi2, &n1)
gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
gins(optoas(n.Op, lo1.Type), &n1, &ah)
gc.Regfree(&n1)
}
@ -767,7 +767,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
* generate comparison of nl, nr, both 64-bit.
* nl is memory; nr is constant or memory.
*/
func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
var lo1 gc.Node
var hi1 gc.Node
var lo2 gc.Node

View File

@ -173,7 +173,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
@ -477,7 +477,7 @@ func ginscon(as int, c int64, n *gc.Node) {
gc.Regfree(&n2)
}
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op)
n1, n2 = n2, n1

View File

@ -757,11 +757,36 @@ func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *o
/*
* return Axxx for Oxxx on type t.
*/
func optoas(op int, t *gc.Type) int {
func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
// avoid constant conversions in switches below
const (
OMINUS_ = uint32(gc.OMINUS) << 16
OLSH_ = uint32(gc.OLSH) << 16
ORSH_ = uint32(gc.ORSH) << 16
OADD_ = uint32(gc.OADD) << 16
OSUB_ = uint32(gc.OSUB) << 16
OMUL_ = uint32(gc.OMUL) << 16
ODIV_ = uint32(gc.ODIV) << 16
OMOD_ = uint32(gc.OMOD) << 16
OOR_ = uint32(gc.OOR) << 16
OAND_ = uint32(gc.OAND) << 16
OXOR_ = uint32(gc.OXOR) << 16
OEQ_ = uint32(gc.OEQ) << 16
ONE_ = uint32(gc.ONE) << 16
OLT_ = uint32(gc.OLT) << 16
OLE_ = uint32(gc.OLE) << 16
OGE_ = uint32(gc.OGE) << 16
OGT_ = uint32(gc.OGT) << 16
OCMP_ = uint32(gc.OCMP) << 16
OPS_ = uint32(gc.OPS) << 16
OAS_ = uint32(gc.OAS) << 16
OSQRT_ = uint32(gc.OSQRT) << 16
)
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
@ -776,261 +801,261 @@ func optoas(op int, t *gc.Type) int {
break;
*/
// TODO(kaib): make sure the conditional branches work on all edge cases
case gc.OEQ<<16 | gc.TBOOL,
gc.OEQ<<16 | gc.TINT8,
gc.OEQ<<16 | gc.TUINT8,
gc.OEQ<<16 | gc.TINT16,
gc.OEQ<<16 | gc.TUINT16,
gc.OEQ<<16 | gc.TINT32,
gc.OEQ<<16 | gc.TUINT32,
gc.OEQ<<16 | gc.TINT64,
gc.OEQ<<16 | gc.TUINT64,
gc.OEQ<<16 | gc.TPTR32,
gc.OEQ<<16 | gc.TPTR64,
gc.OEQ<<16 | gc.TFLOAT32,
gc.OEQ<<16 | gc.TFLOAT64:
case OEQ_ | gc.TBOOL,
OEQ_ | gc.TINT8,
OEQ_ | gc.TUINT8,
OEQ_ | gc.TINT16,
OEQ_ | gc.TUINT16,
OEQ_ | gc.TINT32,
OEQ_ | gc.TUINT32,
OEQ_ | gc.TINT64,
OEQ_ | gc.TUINT64,
OEQ_ | gc.TPTR32,
OEQ_ | gc.TPTR64,
OEQ_ | gc.TFLOAT32,
OEQ_ | gc.TFLOAT64:
a = arm.ABEQ
case gc.ONE<<16 | gc.TBOOL,
gc.ONE<<16 | gc.TINT8,
gc.ONE<<16 | gc.TUINT8,
gc.ONE<<16 | gc.TINT16,
gc.ONE<<16 | gc.TUINT16,
gc.ONE<<16 | gc.TINT32,
gc.ONE<<16 | gc.TUINT32,
gc.ONE<<16 | gc.TINT64,
gc.ONE<<16 | gc.TUINT64,
gc.ONE<<16 | gc.TPTR32,
gc.ONE<<16 | gc.TPTR64,
gc.ONE<<16 | gc.TFLOAT32,
gc.ONE<<16 | gc.TFLOAT64:
case ONE_ | gc.TBOOL,
ONE_ | gc.TINT8,
ONE_ | gc.TUINT8,
ONE_ | gc.TINT16,
ONE_ | gc.TUINT16,
ONE_ | gc.TINT32,
ONE_ | gc.TUINT32,
ONE_ | gc.TINT64,
ONE_ | gc.TUINT64,
ONE_ | gc.TPTR32,
ONE_ | gc.TPTR64,
ONE_ | gc.TFLOAT32,
ONE_ | gc.TFLOAT64:
a = arm.ABNE
case gc.OLT<<16 | gc.TINT8,
gc.OLT<<16 | gc.TINT16,
gc.OLT<<16 | gc.TINT32,
gc.OLT<<16 | gc.TINT64,
gc.OLT<<16 | gc.TFLOAT32,
gc.OLT<<16 | gc.TFLOAT64:
case OLT_ | gc.TINT8,
OLT_ | gc.TINT16,
OLT_ | gc.TINT32,
OLT_ | gc.TINT64,
OLT_ | gc.TFLOAT32,
OLT_ | gc.TFLOAT64:
a = arm.ABLT
case gc.OLT<<16 | gc.TUINT8,
gc.OLT<<16 | gc.TUINT16,
gc.OLT<<16 | gc.TUINT32,
gc.OLT<<16 | gc.TUINT64:
case OLT_ | gc.TUINT8,
OLT_ | gc.TUINT16,
OLT_ | gc.TUINT32,
OLT_ | gc.TUINT64:
a = arm.ABLO
case gc.OLE<<16 | gc.TINT8,
gc.OLE<<16 | gc.TINT16,
gc.OLE<<16 | gc.TINT32,
gc.OLE<<16 | gc.TINT64,
gc.OLE<<16 | gc.TFLOAT32,
gc.OLE<<16 | gc.TFLOAT64:
case OLE_ | gc.TINT8,
OLE_ | gc.TINT16,
OLE_ | gc.TINT32,
OLE_ | gc.TINT64,
OLE_ | gc.TFLOAT32,
OLE_ | gc.TFLOAT64:
a = arm.ABLE
case gc.OLE<<16 | gc.TUINT8,
gc.OLE<<16 | gc.TUINT16,
gc.OLE<<16 | gc.TUINT32,
gc.OLE<<16 | gc.TUINT64:
case OLE_ | gc.TUINT8,
OLE_ | gc.TUINT16,
OLE_ | gc.TUINT32,
OLE_ | gc.TUINT64:
a = arm.ABLS
case gc.OGT<<16 | gc.TINT8,
gc.OGT<<16 | gc.TINT16,
gc.OGT<<16 | gc.TINT32,
gc.OGT<<16 | gc.TINT64,
gc.OGT<<16 | gc.TFLOAT32,
gc.OGT<<16 | gc.TFLOAT64:
case OGT_ | gc.TINT8,
OGT_ | gc.TINT16,
OGT_ | gc.TINT32,
OGT_ | gc.TINT64,
OGT_ | gc.TFLOAT32,
OGT_ | gc.TFLOAT64:
a = arm.ABGT
case gc.OGT<<16 | gc.TUINT8,
gc.OGT<<16 | gc.TUINT16,
gc.OGT<<16 | gc.TUINT32,
gc.OGT<<16 | gc.TUINT64:
case OGT_ | gc.TUINT8,
OGT_ | gc.TUINT16,
OGT_ | gc.TUINT32,
OGT_ | gc.TUINT64:
a = arm.ABHI
case gc.OGE<<16 | gc.TINT8,
gc.OGE<<16 | gc.TINT16,
gc.OGE<<16 | gc.TINT32,
gc.OGE<<16 | gc.TINT64,
gc.OGE<<16 | gc.TFLOAT32,
gc.OGE<<16 | gc.TFLOAT64:
case OGE_ | gc.TINT8,
OGE_ | gc.TINT16,
OGE_ | gc.TINT32,
OGE_ | gc.TINT64,
OGE_ | gc.TFLOAT32,
OGE_ | gc.TFLOAT64:
a = arm.ABGE
case gc.OGE<<16 | gc.TUINT8,
gc.OGE<<16 | gc.TUINT16,
gc.OGE<<16 | gc.TUINT32,
gc.OGE<<16 | gc.TUINT64:
case OGE_ | gc.TUINT8,
OGE_ | gc.TUINT16,
OGE_ | gc.TUINT32,
OGE_ | gc.TUINT64:
a = arm.ABHS
case gc.OCMP<<16 | gc.TBOOL,
gc.OCMP<<16 | gc.TINT8,
gc.OCMP<<16 | gc.TUINT8,
gc.OCMP<<16 | gc.TINT16,
gc.OCMP<<16 | gc.TUINT16,
gc.OCMP<<16 | gc.TINT32,
gc.OCMP<<16 | gc.TUINT32,
gc.OCMP<<16 | gc.TPTR32:
case OCMP_ | gc.TBOOL,
OCMP_ | gc.TINT8,
OCMP_ | gc.TUINT8,
OCMP_ | gc.TINT16,
OCMP_ | gc.TUINT16,
OCMP_ | gc.TINT32,
OCMP_ | gc.TUINT32,
OCMP_ | gc.TPTR32:
a = arm.ACMP
case gc.OCMP<<16 | gc.TFLOAT32:
case OCMP_ | gc.TFLOAT32:
a = arm.ACMPF
case gc.OCMP<<16 | gc.TFLOAT64:
case OCMP_ | gc.TFLOAT64:
a = arm.ACMPD
case gc.OPS<<16 | gc.TFLOAT32,
gc.OPS<<16 | gc.TFLOAT64:
case OPS_ | gc.TFLOAT32,
OPS_ | gc.TFLOAT64:
a = arm.ABVS
case gc.OAS<<16 | gc.TBOOL:
case OAS_ | gc.TBOOL:
a = arm.AMOVB
case gc.OAS<<16 | gc.TINT8:
case OAS_ | gc.TINT8:
a = arm.AMOVBS
case gc.OAS<<16 | gc.TUINT8:
case OAS_ | gc.TUINT8:
a = arm.AMOVBU
case gc.OAS<<16 | gc.TINT16:
case OAS_ | gc.TINT16:
a = arm.AMOVHS
case gc.OAS<<16 | gc.TUINT16:
case OAS_ | gc.TUINT16:
a = arm.AMOVHU
case gc.OAS<<16 | gc.TINT32,
gc.OAS<<16 | gc.TUINT32,
gc.OAS<<16 | gc.TPTR32:
case OAS_ | gc.TINT32,
OAS_ | gc.TUINT32,
OAS_ | gc.TPTR32:
a = arm.AMOVW
case gc.OAS<<16 | gc.TFLOAT32:
case OAS_ | gc.TFLOAT32:
a = arm.AMOVF
case gc.OAS<<16 | gc.TFLOAT64:
case OAS_ | gc.TFLOAT64:
a = arm.AMOVD
case gc.OADD<<16 | gc.TINT8,
gc.OADD<<16 | gc.TUINT8,
gc.OADD<<16 | gc.TINT16,
gc.OADD<<16 | gc.TUINT16,
gc.OADD<<16 | gc.TINT32,
gc.OADD<<16 | gc.TUINT32,
gc.OADD<<16 | gc.TPTR32:
case OADD_ | gc.TINT8,
OADD_ | gc.TUINT8,
OADD_ | gc.TINT16,
OADD_ | gc.TUINT16,
OADD_ | gc.TINT32,
OADD_ | gc.TUINT32,
OADD_ | gc.TPTR32:
a = arm.AADD
case gc.OADD<<16 | gc.TFLOAT32:
case OADD_ | gc.TFLOAT32:
a = arm.AADDF
case gc.OADD<<16 | gc.TFLOAT64:
case OADD_ | gc.TFLOAT64:
a = arm.AADDD
case gc.OSUB<<16 | gc.TINT8,
gc.OSUB<<16 | gc.TUINT8,
gc.OSUB<<16 | gc.TINT16,
gc.OSUB<<16 | gc.TUINT16,
gc.OSUB<<16 | gc.TINT32,
gc.OSUB<<16 | gc.TUINT32,
gc.OSUB<<16 | gc.TPTR32:
case OSUB_ | gc.TINT8,
OSUB_ | gc.TUINT8,
OSUB_ | gc.TINT16,
OSUB_ | gc.TUINT16,
OSUB_ | gc.TINT32,
OSUB_ | gc.TUINT32,
OSUB_ | gc.TPTR32:
a = arm.ASUB
case gc.OSUB<<16 | gc.TFLOAT32:
case OSUB_ | gc.TFLOAT32:
a = arm.ASUBF
case gc.OSUB<<16 | gc.TFLOAT64:
case OSUB_ | gc.TFLOAT64:
a = arm.ASUBD
case gc.OMINUS<<16 | gc.TINT8,
gc.OMINUS<<16 | gc.TUINT8,
gc.OMINUS<<16 | gc.TINT16,
gc.OMINUS<<16 | gc.TUINT16,
gc.OMINUS<<16 | gc.TINT32,
gc.OMINUS<<16 | gc.TUINT32,
gc.OMINUS<<16 | gc.TPTR32:
case OMINUS_ | gc.TINT8,
OMINUS_ | gc.TUINT8,
OMINUS_ | gc.TINT16,
OMINUS_ | gc.TUINT16,
OMINUS_ | gc.TINT32,
OMINUS_ | gc.TUINT32,
OMINUS_ | gc.TPTR32:
a = arm.ARSB
case gc.OAND<<16 | gc.TINT8,
gc.OAND<<16 | gc.TUINT8,
gc.OAND<<16 | gc.TINT16,
gc.OAND<<16 | gc.TUINT16,
gc.OAND<<16 | gc.TINT32,
gc.OAND<<16 | gc.TUINT32,
gc.OAND<<16 | gc.TPTR32:
case OAND_ | gc.TINT8,
OAND_ | gc.TUINT8,
OAND_ | gc.TINT16,
OAND_ | gc.TUINT16,
OAND_ | gc.TINT32,
OAND_ | gc.TUINT32,
OAND_ | gc.TPTR32:
a = arm.AAND
case gc.OOR<<16 | gc.TINT8,
gc.OOR<<16 | gc.TUINT8,
gc.OOR<<16 | gc.TINT16,
gc.OOR<<16 | gc.TUINT16,
gc.OOR<<16 | gc.TINT32,
gc.OOR<<16 | gc.TUINT32,
gc.OOR<<16 | gc.TPTR32:
case OOR_ | gc.TINT8,
OOR_ | gc.TUINT8,
OOR_ | gc.TINT16,
OOR_ | gc.TUINT16,
OOR_ | gc.TINT32,
OOR_ | gc.TUINT32,
OOR_ | gc.TPTR32:
a = arm.AORR
case gc.OXOR<<16 | gc.TINT8,
gc.OXOR<<16 | gc.TUINT8,
gc.OXOR<<16 | gc.TINT16,
gc.OXOR<<16 | gc.TUINT16,
gc.OXOR<<16 | gc.TINT32,
gc.OXOR<<16 | gc.TUINT32,
gc.OXOR<<16 | gc.TPTR32:
case OXOR_ | gc.TINT8,
OXOR_ | gc.TUINT8,
OXOR_ | gc.TINT16,
OXOR_ | gc.TUINT16,
OXOR_ | gc.TINT32,
OXOR_ | gc.TUINT32,
OXOR_ | gc.TPTR32:
a = arm.AEOR
case gc.OLSH<<16 | gc.TINT8,
gc.OLSH<<16 | gc.TUINT8,
gc.OLSH<<16 | gc.TINT16,
gc.OLSH<<16 | gc.TUINT16,
gc.OLSH<<16 | gc.TINT32,
gc.OLSH<<16 | gc.TUINT32,
gc.OLSH<<16 | gc.TPTR32:
case OLSH_ | gc.TINT8,
OLSH_ | gc.TUINT8,
OLSH_ | gc.TINT16,
OLSH_ | gc.TUINT16,
OLSH_ | gc.TINT32,
OLSH_ | gc.TUINT32,
OLSH_ | gc.TPTR32:
a = arm.ASLL
case gc.ORSH<<16 | gc.TUINT8,
gc.ORSH<<16 | gc.TUINT16,
gc.ORSH<<16 | gc.TUINT32,
gc.ORSH<<16 | gc.TPTR32:
case ORSH_ | gc.TUINT8,
ORSH_ | gc.TUINT16,
ORSH_ | gc.TUINT32,
ORSH_ | gc.TPTR32:
a = arm.ASRL
case gc.ORSH<<16 | gc.TINT8,
gc.ORSH<<16 | gc.TINT16,
gc.ORSH<<16 | gc.TINT32:
case ORSH_ | gc.TINT8,
ORSH_ | gc.TINT16,
ORSH_ | gc.TINT32:
a = arm.ASRA
case gc.OMUL<<16 | gc.TUINT8,
gc.OMUL<<16 | gc.TUINT16,
gc.OMUL<<16 | gc.TUINT32,
gc.OMUL<<16 | gc.TPTR32:
case OMUL_ | gc.TUINT8,
OMUL_ | gc.TUINT16,
OMUL_ | gc.TUINT32,
OMUL_ | gc.TPTR32:
a = arm.AMULU
case gc.OMUL<<16 | gc.TINT8,
gc.OMUL<<16 | gc.TINT16,
gc.OMUL<<16 | gc.TINT32:
case OMUL_ | gc.TINT8,
OMUL_ | gc.TINT16,
OMUL_ | gc.TINT32:
a = arm.AMUL
case gc.OMUL<<16 | gc.TFLOAT32:
case OMUL_ | gc.TFLOAT32:
a = arm.AMULF
case gc.OMUL<<16 | gc.TFLOAT64:
case OMUL_ | gc.TFLOAT64:
a = arm.AMULD
case gc.ODIV<<16 | gc.TUINT8,
gc.ODIV<<16 | gc.TUINT16,
gc.ODIV<<16 | gc.TUINT32,
gc.ODIV<<16 | gc.TPTR32:
case ODIV_ | gc.TUINT8,
ODIV_ | gc.TUINT16,
ODIV_ | gc.TUINT32,
ODIV_ | gc.TPTR32:
a = arm.ADIVU
case gc.ODIV<<16 | gc.TINT8,
gc.ODIV<<16 | gc.TINT16,
gc.ODIV<<16 | gc.TINT32:
case ODIV_ | gc.TINT8,
ODIV_ | gc.TINT16,
ODIV_ | gc.TINT32:
a = arm.ADIV
case gc.OMOD<<16 | gc.TUINT8,
gc.OMOD<<16 | gc.TUINT16,
gc.OMOD<<16 | gc.TUINT32,
gc.OMOD<<16 | gc.TPTR32:
case OMOD_ | gc.TUINT8,
OMOD_ | gc.TUINT16,
OMOD_ | gc.TUINT32,
OMOD_ | gc.TPTR32:
a = arm.AMODU
case gc.OMOD<<16 | gc.TINT8,
gc.OMOD<<16 | gc.TINT16,
gc.OMOD<<16 | gc.TINT32:
case OMOD_ | gc.TINT8,
OMOD_ | gc.TINT16,
OMOD_ | gc.TINT32:
a = arm.AMOD
// case CASE(OEXTEND, TINT16):
@ -1045,13 +1070,13 @@ func optoas(op int, t *gc.Type) int {
// a = ACQO;
// break;
case gc.ODIV<<16 | gc.TFLOAT32:
case ODIV_ | gc.TFLOAT32:
a = arm.ADIVF
case gc.ODIV<<16 | gc.TFLOAT64:
case ODIV_ | gc.TFLOAT64:
a = arm.ADIVD
case gc.OSQRT<<16 | gc.TFLOAT64:
case OSQRT_ | gc.TFLOAT64:
a = arm.ASQRTD
}

View File

@ -140,7 +140,7 @@ var panicdiv *gc.Node
* res = nl % nr
* according to op.
*/
func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will generate undefined result.
@ -310,7 +310,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {

View File

@ -102,7 +102,7 @@ func ginscon2(as int, n2 *gc.Node, c int64) {
gc.Regfree(&ntmp)
}
func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
@ -590,240 +590,264 @@ func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
/*
* return Axxx for Oxxx on type t.
*/
func optoas(op int, t *gc.Type) int {
func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
// avoid constant conversions in switches below
const (
OMINUS_ = uint32(gc.OMINUS) << 16
OLSH_ = uint32(gc.OLSH) << 16
ORSH_ = uint32(gc.ORSH) << 16
OADD_ = uint32(gc.OADD) << 16
OSUB_ = uint32(gc.OSUB) << 16
OMUL_ = uint32(gc.OMUL) << 16
ODIV_ = uint32(gc.ODIV) << 16
OOR_ = uint32(gc.OOR) << 16
OAND_ = uint32(gc.OAND) << 16
OXOR_ = uint32(gc.OXOR) << 16
OEQ_ = uint32(gc.OEQ) << 16
ONE_ = uint32(gc.ONE) << 16
OLT_ = uint32(gc.OLT) << 16
OLE_ = uint32(gc.OLE) << 16
OGE_ = uint32(gc.OGE) << 16
OGT_ = uint32(gc.OGT) << 16
OCMP_ = uint32(gc.OCMP) << 16
OAS_ = uint32(gc.OAS) << 16
OHMUL_ = uint32(gc.OHMUL) << 16
OSQRT_ = uint32(gc.OSQRT) << 16
)
a := int(obj.AXXX)
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
case gc.OEQ<<16 | gc.TBOOL,
gc.OEQ<<16 | gc.TINT8,
gc.OEQ<<16 | gc.TUINT8,
gc.OEQ<<16 | gc.TINT16,
gc.OEQ<<16 | gc.TUINT16,
gc.OEQ<<16 | gc.TINT32,
gc.OEQ<<16 | gc.TUINT32,
gc.OEQ<<16 | gc.TINT64,
gc.OEQ<<16 | gc.TUINT64,
gc.OEQ<<16 | gc.TPTR32,
gc.OEQ<<16 | gc.TPTR64,
gc.OEQ<<16 | gc.TFLOAT32,
gc.OEQ<<16 | gc.TFLOAT64:
case OEQ_ | gc.TBOOL,
OEQ_ | gc.TINT8,
OEQ_ | gc.TUINT8,
OEQ_ | gc.TINT16,
OEQ_ | gc.TUINT16,
OEQ_ | gc.TINT32,
OEQ_ | gc.TUINT32,
OEQ_ | gc.TINT64,
OEQ_ | gc.TUINT64,
OEQ_ | gc.TPTR32,
OEQ_ | gc.TPTR64,
OEQ_ | gc.TFLOAT32,
OEQ_ | gc.TFLOAT64:
a = arm64.ABEQ
case gc.ONE<<16 | gc.TBOOL,
gc.ONE<<16 | gc.TINT8,
gc.ONE<<16 | gc.TUINT8,
gc.ONE<<16 | gc.TINT16,
gc.ONE<<16 | gc.TUINT16,
gc.ONE<<16 | gc.TINT32,
gc.ONE<<16 | gc.TUINT32,
gc.ONE<<16 | gc.TINT64,
gc.ONE<<16 | gc.TUINT64,
gc.ONE<<16 | gc.TPTR32,
gc.ONE<<16 | gc.TPTR64,
gc.ONE<<16 | gc.TFLOAT32,
gc.ONE<<16 | gc.TFLOAT64:
case ONE_ | gc.TBOOL,
ONE_ | gc.TINT8,
ONE_ | gc.TUINT8,
ONE_ | gc.TINT16,
ONE_ | gc.TUINT16,
ONE_ | gc.TINT32,
ONE_ | gc.TUINT32,
ONE_ | gc.TINT64,
ONE_ | gc.TUINT64,
ONE_ | gc.TPTR32,
ONE_ | gc.TPTR64,
ONE_ | gc.TFLOAT32,
ONE_ | gc.TFLOAT64:
a = arm64.ABNE
case gc.OLT<<16 | gc.TINT8,
gc.OLT<<16 | gc.TINT16,
gc.OLT<<16 | gc.TINT32,
gc.OLT<<16 | gc.TINT64:
case OLT_ | gc.TINT8,
OLT_ | gc.TINT16,
OLT_ | gc.TINT32,
OLT_ | gc.TINT64:
a = arm64.ABLT
case gc.OLT<<16 | gc.TUINT8,
gc.OLT<<16 | gc.TUINT16,
gc.OLT<<16 | gc.TUINT32,
gc.OLT<<16 | gc.TUINT64,
gc.OLT<<16 | gc.TFLOAT32,
gc.OLT<<16 | gc.TFLOAT64:
case OLT_ | gc.TUINT8,
OLT_ | gc.TUINT16,
OLT_ | gc.TUINT32,
OLT_ | gc.TUINT64,
OLT_ | gc.TFLOAT32,
OLT_ | gc.TFLOAT64:
a = arm64.ABLO
case gc.OLE<<16 | gc.TINT8,
gc.OLE<<16 | gc.TINT16,
gc.OLE<<16 | gc.TINT32,
gc.OLE<<16 | gc.TINT64:
case OLE_ | gc.TINT8,
OLE_ | gc.TINT16,
OLE_ | gc.TINT32,
OLE_ | gc.TINT64:
a = arm64.ABLE
case gc.OLE<<16 | gc.TUINT8,
gc.OLE<<16 | gc.TUINT16,
gc.OLE<<16 | gc.TUINT32,
gc.OLE<<16 | gc.TUINT64,
gc.OLE<<16 | gc.TFLOAT32,
gc.OLE<<16 | gc.TFLOAT64:
case OLE_ | gc.TUINT8,
OLE_ | gc.TUINT16,
OLE_ | gc.TUINT32,
OLE_ | gc.TUINT64,
OLE_ | gc.TFLOAT32,
OLE_ | gc.TFLOAT64:
a = arm64.ABLS
case gc.OGT<<16 | gc.TINT8,
gc.OGT<<16 | gc.TINT16,
gc.OGT<<16 | gc.TINT32,
gc.OGT<<16 | gc.TINT64,
gc.OGT<<16 | gc.TFLOAT32,
gc.OGT<<16 | gc.TFLOAT64:
case OGT_ | gc.TINT8,
OGT_ | gc.TINT16,
OGT_ | gc.TINT32,
OGT_ | gc.TINT64,
OGT_ | gc.TFLOAT32,
OGT_ | gc.TFLOAT64:
a = arm64.ABGT
case gc.OGT<<16 | gc.TUINT8,
gc.OGT<<16 | gc.TUINT16,
gc.OGT<<16 | gc.TUINT32,
gc.OGT<<16 | gc.TUINT64:
case OGT_ | gc.TUINT8,
OGT_ | gc.TUINT16,
OGT_ | gc.TUINT32,
OGT_ | gc.TUINT64:
a = arm64.ABHI
case gc.OGE<<16 | gc.TINT8,
gc.OGE<<16 | gc.TINT16,
gc.OGE<<16 | gc.TINT32,
gc.OGE<<16 | gc.TINT64,
gc.OGE<<16 | gc.TFLOAT32,
gc.OGE<<16 | gc.TFLOAT64:
case OGE_ | gc.TINT8,
OGE_ | gc.TINT16,
OGE_ | gc.TINT32,
OGE_ | gc.TINT64,
OGE_ | gc.TFLOAT32,
OGE_ | gc.TFLOAT64:
a = arm64.ABGE
case gc.OGE<<16 | gc.TUINT8,
gc.OGE<<16 | gc.TUINT16,
gc.OGE<<16 | gc.TUINT32,
gc.OGE<<16 | gc.TUINT64:
case OGE_ | gc.TUINT8,
OGE_ | gc.TUINT16,
OGE_ | gc.TUINT32,
OGE_ | gc.TUINT64:
a = arm64.ABHS
case gc.OCMP<<16 | gc.TBOOL,
gc.OCMP<<16 | gc.TINT8,
gc.OCMP<<16 | gc.TINT16,
gc.OCMP<<16 | gc.TINT32,
gc.OCMP<<16 | gc.TPTR32,
gc.OCMP<<16 | gc.TINT64,
gc.OCMP<<16 | gc.TUINT8,
gc.OCMP<<16 | gc.TUINT16,
gc.OCMP<<16 | gc.TUINT32,
gc.OCMP<<16 | gc.TUINT64,
gc.OCMP<<16 | gc.TPTR64:
case OCMP_ | gc.TBOOL,
OCMP_ | gc.TINT8,
OCMP_ | gc.TINT16,
OCMP_ | gc.TINT32,
OCMP_ | gc.TPTR32,
OCMP_ | gc.TINT64,
OCMP_ | gc.TUINT8,
OCMP_ | gc.TUINT16,
OCMP_ | gc.TUINT32,
OCMP_ | gc.TUINT64,
OCMP_ | gc.TPTR64:
a = arm64.ACMP
case gc.OCMP<<16 | gc.TFLOAT32:
case OCMP_ | gc.TFLOAT32:
a = arm64.AFCMPS
case gc.OCMP<<16 | gc.TFLOAT64:
case OCMP_ | gc.TFLOAT64:
a = arm64.AFCMPD
case gc.OAS<<16 | gc.TBOOL,
gc.OAS<<16 | gc.TINT8:
case OAS_ | gc.TBOOL,
OAS_ | gc.TINT8:
a = arm64.AMOVB
case gc.OAS<<16 | gc.TUINT8:
case OAS_ | gc.TUINT8:
a = arm64.AMOVBU
case gc.OAS<<16 | gc.TINT16:
case OAS_ | gc.TINT16:
a = arm64.AMOVH
case gc.OAS<<16 | gc.TUINT16:
case OAS_ | gc.TUINT16:
a = arm64.AMOVHU
case gc.OAS<<16 | gc.TINT32:
case OAS_ | gc.TINT32:
a = arm64.AMOVW
case gc.OAS<<16 | gc.TUINT32,
gc.OAS<<16 | gc.TPTR32:
case OAS_ | gc.TUINT32,
OAS_ | gc.TPTR32:
a = arm64.AMOVWU
case gc.OAS<<16 | gc.TINT64,
gc.OAS<<16 | gc.TUINT64,
gc.OAS<<16 | gc.TPTR64:
case OAS_ | gc.TINT64,
OAS_ | gc.TUINT64,
OAS_ | gc.TPTR64:
a = arm64.AMOVD
case gc.OAS<<16 | gc.TFLOAT32:
case OAS_ | gc.TFLOAT32:
a = arm64.AFMOVS
case gc.OAS<<16 | gc.TFLOAT64:
case OAS_ | gc.TFLOAT64:
a = arm64.AFMOVD
case gc.OADD<<16 | gc.TINT8,
gc.OADD<<16 | gc.TUINT8,
gc.OADD<<16 | gc.TINT16,
gc.OADD<<16 | gc.TUINT16,
gc.OADD<<16 | gc.TINT32,
gc.OADD<<16 | gc.TUINT32,
gc.OADD<<16 | gc.TPTR32,
gc.OADD<<16 | gc.TINT64,
gc.OADD<<16 | gc.TUINT64,
gc.OADD<<16 | gc.TPTR64:
case OADD_ | gc.TINT8,
OADD_ | gc.TUINT8,
OADD_ | gc.TINT16,
OADD_ | gc.TUINT16,
OADD_ | gc.TINT32,
OADD_ | gc.TUINT32,
OADD_ | gc.TPTR32,
OADD_ | gc.TINT64,
OADD_ | gc.TUINT64,
OADD_ | gc.TPTR64:
a = arm64.AADD
case gc.OADD<<16 | gc.TFLOAT32:
case OADD_ | gc.TFLOAT32:
a = arm64.AFADDS
case gc.OADD<<16 | gc.TFLOAT64:
case OADD_ | gc.TFLOAT64:
a = arm64.AFADDD
case gc.OSUB<<16 | gc.TINT8,
gc.OSUB<<16 | gc.TUINT8,
gc.OSUB<<16 | gc.TINT16,
gc.OSUB<<16 | gc.TUINT16,
gc.OSUB<<16 | gc.TINT32,
gc.OSUB<<16 | gc.TUINT32,
gc.OSUB<<16 | gc.TPTR32,
gc.OSUB<<16 | gc.TINT64,
gc.OSUB<<16 | gc.TUINT64,
gc.OSUB<<16 | gc.TPTR64:
case OSUB_ | gc.TINT8,
OSUB_ | gc.TUINT8,
OSUB_ | gc.TINT16,
OSUB_ | gc.TUINT16,
OSUB_ | gc.TINT32,
OSUB_ | gc.TUINT32,
OSUB_ | gc.TPTR32,
OSUB_ | gc.TINT64,
OSUB_ | gc.TUINT64,
OSUB_ | gc.TPTR64:
a = arm64.ASUB
case gc.OSUB<<16 | gc.TFLOAT32:
case OSUB_ | gc.TFLOAT32:
a = arm64.AFSUBS
case gc.OSUB<<16 | gc.TFLOAT64:
case OSUB_ | gc.TFLOAT64:
a = arm64.AFSUBD
case gc.OMINUS<<16 | gc.TINT8,
gc.OMINUS<<16 | gc.TUINT8,
gc.OMINUS<<16 | gc.TINT16,
gc.OMINUS<<16 | gc.TUINT16,
gc.OMINUS<<16 | gc.TINT32,
gc.OMINUS<<16 | gc.TUINT32,
gc.OMINUS<<16 | gc.TPTR32,
gc.OMINUS<<16 | gc.TINT64,
gc.OMINUS<<16 | gc.TUINT64,
gc.OMINUS<<16 | gc.TPTR64:
case OMINUS_ | gc.TINT8,
OMINUS_ | gc.TUINT8,
OMINUS_ | gc.TINT16,
OMINUS_ | gc.TUINT16,
OMINUS_ | gc.TINT32,
OMINUS_ | gc.TUINT32,
OMINUS_ | gc.TPTR32,
OMINUS_ | gc.TINT64,
OMINUS_ | gc.TUINT64,
OMINUS_ | gc.TPTR64:
a = arm64.ANEG
case gc.OMINUS<<16 | gc.TFLOAT32:
case OMINUS_ | gc.TFLOAT32:
a = arm64.AFNEGS
case gc.OMINUS<<16 | gc.TFLOAT64:
case OMINUS_ | gc.TFLOAT64:
a = arm64.AFNEGD
case gc.OAND<<16 | gc.TINT8,
gc.OAND<<16 | gc.TUINT8,
gc.OAND<<16 | gc.TINT16,
gc.OAND<<16 | gc.TUINT16,
gc.OAND<<16 | gc.TINT32,
gc.OAND<<16 | gc.TUINT32,
gc.OAND<<16 | gc.TPTR32,
gc.OAND<<16 | gc.TINT64,
gc.OAND<<16 | gc.TUINT64,
gc.OAND<<16 | gc.TPTR64:
case OAND_ | gc.TINT8,
OAND_ | gc.TUINT8,
OAND_ | gc.TINT16,
OAND_ | gc.TUINT16,
OAND_ | gc.TINT32,
OAND_ | gc.TUINT32,
OAND_ | gc.TPTR32,
OAND_ | gc.TINT64,
OAND_ | gc.TUINT64,
OAND_ | gc.TPTR64:
a = arm64.AAND
case gc.OOR<<16 | gc.TINT8,
gc.OOR<<16 | gc.TUINT8,
gc.OOR<<16 | gc.TINT16,
gc.OOR<<16 | gc.TUINT16,
gc.OOR<<16 | gc.TINT32,
gc.OOR<<16 | gc.TUINT32,
gc.OOR<<16 | gc.TPTR32,
gc.OOR<<16 | gc.TINT64,
gc.OOR<<16 | gc.TUINT64,
gc.OOR<<16 | gc.TPTR64:
case OOR_ | gc.TINT8,
OOR_ | gc.TUINT8,
OOR_ | gc.TINT16,
OOR_ | gc.TUINT16,
OOR_ | gc.TINT32,
OOR_ | gc.TUINT32,
OOR_ | gc.TPTR32,
OOR_ | gc.TINT64,
OOR_ | gc.TUINT64,
OOR_ | gc.TPTR64:
a = arm64.AORR
case gc.OXOR<<16 | gc.TINT8,
gc.OXOR<<16 | gc.TUINT8,
gc.OXOR<<16 | gc.TINT16,
gc.OXOR<<16 | gc.TUINT16,
gc.OXOR<<16 | gc.TINT32,
gc.OXOR<<16 | gc.TUINT32,
gc.OXOR<<16 | gc.TPTR32,
gc.OXOR<<16 | gc.TINT64,
gc.OXOR<<16 | gc.TUINT64,
gc.OXOR<<16 | gc.TPTR64:
case OXOR_ | gc.TINT8,
OXOR_ | gc.TUINT8,
OXOR_ | gc.TINT16,
OXOR_ | gc.TUINT16,
OXOR_ | gc.TINT32,
OXOR_ | gc.TUINT32,
OXOR_ | gc.TPTR32,
OXOR_ | gc.TINT64,
OXOR_ | gc.TUINT64,
OXOR_ | gc.TPTR64:
a = arm64.AEOR
// TODO(minux): handle rotates
@ -840,30 +864,30 @@ func optoas(op int, t *gc.Type) int {
// a = 0//???; RLDC?
// break;
case gc.OLSH<<16 | gc.TINT8,
gc.OLSH<<16 | gc.TUINT8,
gc.OLSH<<16 | gc.TINT16,
gc.OLSH<<16 | gc.TUINT16,
gc.OLSH<<16 | gc.TINT32,
gc.OLSH<<16 | gc.TUINT32,
gc.OLSH<<16 | gc.TPTR32,
gc.OLSH<<16 | gc.TINT64,
gc.OLSH<<16 | gc.TUINT64,
gc.OLSH<<16 | gc.TPTR64:
case OLSH_ | gc.TINT8,
OLSH_ | gc.TUINT8,
OLSH_ | gc.TINT16,
OLSH_ | gc.TUINT16,
OLSH_ | gc.TINT32,
OLSH_ | gc.TUINT32,
OLSH_ | gc.TPTR32,
OLSH_ | gc.TINT64,
OLSH_ | gc.TUINT64,
OLSH_ | gc.TPTR64:
a = arm64.ALSL
case gc.ORSH<<16 | gc.TUINT8,
gc.ORSH<<16 | gc.TUINT16,
gc.ORSH<<16 | gc.TUINT32,
gc.ORSH<<16 | gc.TPTR32,
gc.ORSH<<16 | gc.TUINT64,
gc.ORSH<<16 | gc.TPTR64:
case ORSH_ | gc.TUINT8,
ORSH_ | gc.TUINT16,
ORSH_ | gc.TUINT32,
ORSH_ | gc.TPTR32,
ORSH_ | gc.TUINT64,
ORSH_ | gc.TPTR64:
a = arm64.ALSR
case gc.ORSH<<16 | gc.TINT8,
gc.ORSH<<16 | gc.TINT16,
gc.ORSH<<16 | gc.TINT32,
gc.ORSH<<16 | gc.TINT64:
case ORSH_ | gc.TINT8,
ORSH_ | gc.TINT16,
ORSH_ | gc.TINT32,
ORSH_ | gc.TINT64:
a = arm64.AASR
// TODO(minux): handle rotates
@ -878,59 +902,59 @@ func optoas(op int, t *gc.Type) int {
// a = 0//??? RLDC??
// break;
case gc.OHMUL<<16 | gc.TINT64:
case OHMUL_ | gc.TINT64:
a = arm64.ASMULH
case gc.OHMUL<<16 | gc.TUINT64,
gc.OHMUL<<16 | gc.TPTR64:
case OHMUL_ | gc.TUINT64,
OHMUL_ | gc.TPTR64:
a = arm64.AUMULH
case gc.OMUL<<16 | gc.TINT8,
gc.OMUL<<16 | gc.TINT16,
gc.OMUL<<16 | gc.TINT32:
case OMUL_ | gc.TINT8,
OMUL_ | gc.TINT16,
OMUL_ | gc.TINT32:
a = arm64.ASMULL
case gc.OMUL<<16 | gc.TINT64:
case OMUL_ | gc.TINT64:
a = arm64.AMUL
case gc.OMUL<<16 | gc.TUINT8,
gc.OMUL<<16 | gc.TUINT16,
gc.OMUL<<16 | gc.TUINT32,
gc.OMUL<<16 | gc.TPTR32:
case OMUL_ | gc.TUINT8,
OMUL_ | gc.TUINT16,
OMUL_ | gc.TUINT32,
OMUL_ | gc.TPTR32:
// don't use word multiply, the high 32-bit are undefined.
a = arm64.AUMULL
case gc.OMUL<<16 | gc.TUINT64,
gc.OMUL<<16 | gc.TPTR64:
case OMUL_ | gc.TUINT64,
OMUL_ | gc.TPTR64:
a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter.
case gc.OMUL<<16 | gc.TFLOAT32:
case OMUL_ | gc.TFLOAT32:
a = arm64.AFMULS
case gc.OMUL<<16 | gc.TFLOAT64:
case OMUL_ | gc.TFLOAT64:
a = arm64.AFMULD
case gc.ODIV<<16 | gc.TINT8,
gc.ODIV<<16 | gc.TINT16,
gc.ODIV<<16 | gc.TINT32,
gc.ODIV<<16 | gc.TINT64:
case ODIV_ | gc.TINT8,
ODIV_ | gc.TINT16,
ODIV_ | gc.TINT32,
ODIV_ | gc.TINT64:
a = arm64.ASDIV
case gc.ODIV<<16 | gc.TUINT8,
gc.ODIV<<16 | gc.TUINT16,
gc.ODIV<<16 | gc.TUINT32,
gc.ODIV<<16 | gc.TPTR32,
gc.ODIV<<16 | gc.TUINT64,
gc.ODIV<<16 | gc.TPTR64:
case ODIV_ | gc.TUINT8,
ODIV_ | gc.TUINT16,
ODIV_ | gc.TUINT32,
ODIV_ | gc.TPTR32,
ODIV_ | gc.TUINT64,
ODIV_ | gc.TPTR64:
a = arm64.AUDIV
case gc.ODIV<<16 | gc.TFLOAT32:
case ODIV_ | gc.TFLOAT32:
a = arm64.AFDIVS
case gc.ODIV<<16 | gc.TFLOAT64:
case ODIV_ | gc.TFLOAT64:
a = arm64.AFDIVD
case gc.OSQRT<<16 | gc.TFLOAT64:
case OSQRT_ | gc.TFLOAT64:
a = arm64.AFSQRTD
}

View File

@ -6,12 +6,8 @@ package gc
import "cmd/internal/obj"
/*
* machine size and rounding
* alignment is dictated around
* the size of a pointer, set in betypeinit
* (see ../6g/galign.c).
*/
// machine size and rounding alignment is dictated around
// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
var defercalc int
func Rnd(o int64, r int64) int64 {
@ -68,7 +64,7 @@ func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
f.Width = o // really offset for TFIELD
if f.Nname != nil {
// this same stackparam logic is in addrescapes
// in typecheck.c. usually addrescapes runs after
// in typecheck.go. usually addrescapes runs after
// widstruct, in which case we could drop this,
// but function closure functions are the exception.
if f.Nname.Name.Param.Stackparam != nil {
@ -153,15 +149,15 @@ func dowidth(t *Type) {
t.Width = -2
t.Align = 0
et := int32(t.Etype)
et := t.Etype
switch et {
case TFUNC, TCHAN, TMAP, TSTRING:
break
/* simtype == 0 during bootstrap */
// simtype == 0 during bootstrap
default:
if Simtype[t.Etype] != 0 {
et = int32(Simtype[t.Etype])
et = Simtype[t.Etype]
}
}
@ -170,7 +166,7 @@ func dowidth(t *Type) {
default:
Fatalf("dowidth: unknown type: %v", t)
/* compiler-specific stuff */
// compiler-specific stuff
case TINT8, TUINT8, TBOOL:
// bool is int8
w = 1
@ -238,7 +234,7 @@ func dowidth(t *Type) {
}
w = 1 // anything will do
// dummy type; should be replaced before use.
// dummy type; should be replaced before use.
case TANY:
if Debug['A'] == 0 {
Fatalf("dowidth any")
@ -286,7 +282,7 @@ func dowidth(t *Type) {
}
w = widstruct(t, t, 0, 1)
// make fake type to check later to
// make fake type to check later to
// trigger function argument computation.
case TFUNC:
t1 := typ(TFUNCARGS)
@ -297,7 +293,7 @@ func dowidth(t *Type) {
// width of func type is pointer
w = int64(Widthptr)
// function is 3 cated structures;
// function is 3 cated structures;
// compute their widths as side-effect.
case TFUNCARGS:
t1 := t.Type
@ -333,23 +329,21 @@ func dowidth(t *Type) {
}
}
/*
* when a type's width should be known, we call checkwidth
* to compute it. during a declaration like
*
* type T *struct { next T }
*
* it is necessary to defer the calculation of the struct width
* until after T has been initialized to be a pointer to that struct.
* similarly, during import processing structs may be used
* before their definition. in those situations, calling
* defercheckwidth() stops width calculations until
* resumecheckwidth() is called, at which point all the
* checkwidths that were deferred are executed.
* dowidth should only be called when the type's size
* is needed immediately. checkwidth makes sure the
* size is evaluated eventually.
*/
// when a type's width should be known, we call checkwidth
// to compute it. during a declaration like
//
// type T *struct { next T }
//
// it is necessary to defer the calculation of the struct width
// until after T has been initialized to be a pointer to that struct.
// similarly, during import processing structs may be used
// before their definition. in those situations, calling
// defercheckwidth() stops width calculations until
// resumecheckwidth() is called, at which point all the
// checkwidths that were deferred are executed.
// dowidth should only be called when the type's size
// is needed immediately. checkwidth makes sure the
// size is evaluated eventually.
type TypeList struct {
t *Type
next *TypeList
@ -422,8 +416,8 @@ func typeinit() {
Fatalf("typeinit before betypeinit")
}
for i := 0; i < NTYPE; i++ {
Simtype[i] = uint8(i)
for et := EType(0); et < NTYPE; et++ {
Simtype[et] = et
}
Types[TPTR32] = typ(TPTR32)
@ -445,8 +439,8 @@ func typeinit() {
Tptr = TPTR64
}
for i := TINT8; i <= TUINT64; i++ {
Isint[i] = true
for et := TINT8; et <= TUINT64; et++ {
Isint[et] = true
}
Isint[TINT] = true
Isint[TUINT] = true
@ -469,39 +463,37 @@ func typeinit() {
Issigned[TINT32] = true
Issigned[TINT64] = true
/*
* initialize okfor
*/
for i := 0; i < NTYPE; i++ {
if Isint[i] || i == TIDEAL {
okforeq[i] = true
okforcmp[i] = true
okforarith[i] = true
okforadd[i] = true
okforand[i] = true
okforconst[i] = true
issimple[i] = true
Minintval[i] = new(Mpint)
Maxintval[i] = new(Mpint)
// initialize okfor
for et := EType(0); et < NTYPE; et++ {
if Isint[et] || et == TIDEAL {
okforeq[et] = true
okforcmp[et] = true
okforarith[et] = true
okforadd[et] = true
okforand[et] = true
okforconst[et] = true
issimple[et] = true
Minintval[et] = new(Mpint)
Maxintval[et] = new(Mpint)
}
if Isfloat[i] {
okforeq[i] = true
okforcmp[i] = true
okforadd[i] = true
okforarith[i] = true
okforconst[i] = true
issimple[i] = true
minfltval[i] = newMpflt()
maxfltval[i] = newMpflt()
if Isfloat[et] {
okforeq[et] = true
okforcmp[et] = true
okforadd[et] = true
okforarith[et] = true
okforconst[et] = true
issimple[et] = true
minfltval[et] = newMpflt()
maxfltval[et] = newMpflt()
}
if Iscomplex[i] {
okforeq[i] = true
okforadd[i] = true
okforarith[i] = true
okforconst[i] = true
issimple[i] = true
if Iscomplex[et] {
okforeq[et] = true
okforadd[et] = true
okforarith[et] = true
okforconst[et] = true
issimple[et] = true
}
}
@ -599,10 +591,10 @@ func typeinit() {
mpatofix(Maxintval[TUINT32], "0xffffffff")
mpatofix(Maxintval[TUINT64], "0xffffffffffffffff")
/* f is valid float if min < f < max. (min and max are not themselves valid.) */
mpatoflt(maxfltval[TFLOAT32], "33554431p103") /* 2^24-1 p (127-23) + 1/2 ulp*/
// f is valid float if min < f < max. (min and max are not themselves valid.)
mpatoflt(maxfltval[TFLOAT32], "33554431p103") // 2^24-1 p (127-23) + 1/2 ulp
mpatoflt(minfltval[TFLOAT32], "-33554431p103")
mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") /* 2^53-1 p (1023-52) + 1/2 ulp */
mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") // 2^53-1 p (1023-52) + 1/2 ulp
mpatoflt(minfltval[TFLOAT64], "-18014398509481983p970")
maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
@ -610,40 +602,36 @@ func typeinit() {
maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
/* for walk to use in error messages */
// for walk to use in error messages
Types[TFUNC] = functype(nil, nil, nil)
/* types used in front end */
// types used in front end
// types[TNIL] got set early in lexinit
Types[TIDEAL] = typ(TIDEAL)
Types[TINTER] = typ(TINTER)
/* simple aliases */
Simtype[TMAP] = uint8(Tptr)
// simple aliases
Simtype[TMAP] = Tptr
Simtype[TCHAN] = uint8(Tptr)
Simtype[TFUNC] = uint8(Tptr)
Simtype[TUNSAFEPTR] = uint8(Tptr)
Simtype[TCHAN] = Tptr
Simtype[TFUNC] = Tptr
Simtype[TUNSAFEPTR] = Tptr
/* pick up the backend thearch.typedefs */
var s1 *Sym
var etype int
var sameas int
var s *Sym
// pick up the backend thearch.typedefs
for i = range Thearch.Typedefs {
s = Lookup(Thearch.Typedefs[i].Name)
s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
s := Lookup(Thearch.Typedefs[i].Name)
s1 := Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
etype = Thearch.Typedefs[i].Etype
if etype < 0 || etype >= len(Types) {
etype := Thearch.Typedefs[i].Etype
if int(etype) >= len(Types) {
Fatalf("typeinit: %s bad etype", s.Name)
}
sameas = Thearch.Typedefs[i].Sameas
if sameas < 0 || sameas >= len(Types) {
sameas := Thearch.Typedefs[i].Sameas
if int(sameas) >= len(Types) {
Fatalf("typeinit: %s bad sameas", s.Name)
}
Simtype[etype] = uint8(sameas)
Simtype[etype] = sameas
minfltval[etype] = minfltval[sameas]
maxfltval[etype] = maxfltval[sameas]
Minintval[etype] = Minintval[sameas]
@ -678,9 +666,7 @@ func typeinit() {
itable.Type = Types[TUINT8]
}
/*
* compute total size of f's in/out arguments.
*/
// compute total size of f's in/out arguments.
func Argsize(t *Type) int {
var save Iter
var x int64

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,634 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Binary package import.
// Based loosely on x/tools/go/importer.
package gc
import (
"cmd/compile/internal/big"
"cmd/internal/obj"
"encoding/binary"
)
// The overall structure of Import is symmetric to Export: For each
// export method in bexport.go there is a matching and symmetric method
// in bimport.go. Changing the export format requires making symmetric
// changes to bimport.go and bexport.go.
// Import populates importpkg from the serialized package data.
func Import(in *obj.Biobuf) {
p := importer{in: in}
p.buf = p.bufarray[:]
// read low-level encoding format
switch format := p.byte(); format {
case 'c':
// compact format - nothing to do
case 'd':
p.debugFormat = true
default:
Fatalf("invalid encoding format in export data: got %q; want 'c' or 'd'", format)
}
// --- generic export data ---
if v := p.string(); v != exportVersion {
Fatalf("unknown export data version: %s", v)
}
// populate typList with predeclared "known" types
p.typList = append(p.typList, predeclared()...)
// read package data
p.pkg()
if p.pkgList[0] != importpkg {
Fatalf("imported package not found in pkgList[0]")
}
// read compiler-specific flags
importpkg.Safe = p.string() == "safe"
// defer some type-checking until all types are read in completely
// (go.y:import_there)
tcok := typecheckok
typecheckok = true
defercheckwidth()
// read consts
for i := p.int(); i > 0; i-- {
sym := p.localname()
typ := p.typ()
val := p.value(typ)
if isideal(typ) {
// canonicalize ideal types
typ = Types[TIDEAL]
}
importconst(sym, typ, nodlit(val))
}
// read vars
for i := p.int(); i > 0; i-- {
sym := p.localname()
typ := p.typ()
importvar(sym, typ)
}
// read funcs
for i := p.int(); i > 0; i-- {
// go.y:hidden_fndcl
sym := p.localname()
typ := p.typ()
// TODO(gri) fix this
p.int() // read and discard index of inlined function body for now
importsym(sym, ONAME)
if sym.Def != nil && sym.Def.Op == ONAME && !Eqtype(typ, sym.Def.Type) {
Fatalf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, sym.Def.Type, typ)
}
n := newfuncname(sym)
n.Type = typ
declare(n, PFUNC)
funchdr(n)
// go.y:hidden_import
n.Func.Inl = nil
funcbody(n)
importlist = append(importlist, n) // TODO(gri) do this only if body is inlineable?
}
// read types
for i := p.int(); i > 0; i-- {
// name is parsed as part of named type
p.typ()
}
// --- compiler-specific export data ---
for i := p.int(); i > 0; i-- {
p.body()
}
// --- end of export data ---
typecheckok = tcok
resumecheckwidth()
testdclstack() // debugging only
}
type importer struct {
in *obj.Biobuf
buf []byte // for reading strings
bufarray [64]byte // initial underlying array for buf, large enough to avoid allocation when compiling std lib
pkgList []*Pkg
typList []*Type
debugFormat bool
read int // bytes read
}
func (p *importer) pkg() *Pkg {
// if the package was seen before, i is its index (>= 0)
i := p.tagOrIndex()
if i >= 0 {
return p.pkgList[i]
}
// otherwise, i is the package tag (< 0)
if i != packageTag {
Fatalf("expected package tag, found tag = %d", i)
}
// read package data
name := p.string()
path := p.string()
// we should never see an empty package name
if name == "" {
Fatalf("empty package name in import")
}
// we should never see a bad import path
if isbadimport(path) {
Fatalf("bad path in import: %q", path)
}
// an empty path denotes the package we are currently importing
pkg := importpkg
if path != "" {
pkg = mkpkg(path)
}
if pkg.Name == "" {
pkg.Name = name
} else if pkg.Name != name {
Fatalf("inconsistent package names: got %s; want %s (path = %s)", pkg.Name, name, path)
}
p.pkgList = append(p.pkgList, pkg)
return pkg
}
func (p *importer) localname() *Sym {
// go.y:hidden_importsym
name := p.string()
if name == "" {
Fatalf("unexpected anonymous name")
}
structpkg = importpkg // go.y:hidden_pkg_importsym
return importpkg.Lookup(name)
}
func (p *importer) newtyp(etype EType) *Type {
t := typ(etype)
p.typList = append(p.typList, t)
return t
}
func (p *importer) typ() *Type {
// if the type was seen before, i is its index (>= 0)
i := p.tagOrIndex()
if i >= 0 {
return p.typList[i]
}
// otherwise, i is the type tag (< 0)
var t *Type
switch i {
case namedTag:
// go.y:hidden_importsym
tsym := p.qualifiedName()
// go.y:hidden_pkgtype
t = pkgtype(tsym)
importsym(tsym, OTYPE)
p.typList = append(p.typList, t)
// read underlying type
// go.y:hidden_type
t0 := p.typ()
importtype(t, t0) // go.y:hidden_import
// interfaces don't have associated methods
if t0.Etype == TINTER {
break
}
// read associated methods
for i := p.int(); i > 0; i-- {
// go.y:hidden_fndcl
name := p.string()
recv := p.paramList() // TODO(gri) do we need a full param list for the receiver?
params := p.paramList()
result := p.paramList()
// TODO(gri) fix this
p.int() // read and discard index of inlined function body for now
pkg := localpkg
if !exportname(name) {
pkg = tsym.Pkg
}
sym := pkg.Lookup(name)
n := methodname1(newname(sym), recv.N.Right)
n.Type = functype(recv.N, params, result)
checkwidth(n.Type)
// addmethod uses the global variable structpkg to verify consistency
{
saved := structpkg
structpkg = tsym.Pkg
addmethod(sym, n.Type, false, nointerface)
structpkg = saved
}
nointerface = false
funchdr(n)
// (comment from go.y)
// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
// out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there.
n.Type.Nname = n
// go.y:hidden_import
n.Func.Inl = nil
funcbody(n)
importlist = append(importlist, n) // TODO(gri) do this only if body is inlineable?
}
case arrayTag, sliceTag:
t = p.newtyp(TARRAY)
t.Bound = -1
if i == arrayTag {
t.Bound = p.int64()
}
t.Type = p.typ()
case dddTag:
t = p.newtyp(T_old_DARRAY)
t.Bound = -1
t.Type = p.typ()
case structTag:
t = p.newtyp(TSTRUCT)
tostruct0(t, p.fieldList())
case pointerTag:
t = p.newtyp(Tptr)
t.Type = p.typ()
case signatureTag:
t = p.newtyp(TFUNC)
params := p.paramList()
result := p.paramList()
functype0(t, nil, params, result)
case interfaceTag:
t = p.newtyp(TINTER)
if p.int() != 0 {
Fatalf("unexpected embedded interface")
}
tointerface0(t, p.methodList())
case mapTag:
t = p.newtyp(TMAP)
t.Down = p.typ() // key
t.Type = p.typ() // val
case chanTag:
t = p.newtyp(TCHAN)
t.Chan = uint8(p.int())
t.Type = p.typ()
default:
Fatalf("unexpected type (tag = %d)", i)
}
if t == nil {
Fatalf("nil type (type tag = %d)", i)
}
return t
}
func (p *importer) qualifiedName() *Sym {
name := p.string()
pkg := p.pkg()
return pkg.Lookup(name)
}
// go.y:hidden_structdcl_list
func (p *importer) fieldList() *NodeList {
i := p.int()
if i == 0 {
return nil
}
n := list1(p.field())
for i--; i > 0; i-- {
n = list(n, p.field())
}
return n
}
// go.y:hidden_structdcl
func (p *importer) field() *Node {
sym := p.fieldName()
typ := p.typ()
note := p.note()
var n *Node
if sym.Name != "" {
n = Nod(ODCLFIELD, newname(sym), typenod(typ))
} else {
// anonymous field - typ must be T or *T and T must be a type name
s := typ.Sym
if s == nil && Isptr[typ.Etype] {
s = typ.Type.Sym // deref
}
pkg := importpkg
if sym != nil {
pkg = sym.Pkg
}
n = embedded(s, pkg)
n.Right = typenod(typ)
}
n.SetVal(note)
return n
}
func (p *importer) note() (v Val) {
if s := p.string(); s != "" {
v.U = s
}
return
}
// go.y:hidden_interfacedcl_list
func (p *importer) methodList() *NodeList {
i := p.int()
if i == 0 {
return nil
}
n := list1(p.method())
for i--; i > 0; i-- {
n = list(n, p.method())
}
return n
}
// go.y:hidden_interfacedcl
func (p *importer) method() *Node {
sym := p.fieldName()
params := p.paramList()
result := p.paramList()
return Nod(ODCLFIELD, newname(sym), typenod(functype(fakethis(), params, result)))
}
// go.y:sym,hidden_importsym
func (p *importer) fieldName() *Sym {
name := p.string()
pkg := localpkg
if name == "_" {
// During imports, unqualified non-exported identifiers are from builtinpkg
// (see go.y:sym). The binary exporter only exports blank as a non-exported
// identifier without qualification.
pkg = builtinpkg
} else if name == "?" || name != "" && !exportname(name) {
if name == "?" {
name = ""
}
pkg = p.pkg()
}
return pkg.Lookup(name)
}
// go.y:ohidden_funarg_list
func (p *importer) paramList() *NodeList {
i := p.int()
if i == 0 {
return nil
}
// negative length indicates unnamed parameters
named := true
if i < 0 {
i = -i
named = false
}
// i > 0
n := list1(p.param(named))
i--
for ; i > 0; i-- {
n = list(n, p.param(named))
}
return n
}
// go.y:hidden_funarg
func (p *importer) param(named bool) *Node {
typ := p.typ()
isddd := false
if typ.Etype == T_old_DARRAY {
// T_old_DARRAY indicates ... type
typ.Etype = TARRAY
isddd = true
}
n := Nod(ODCLFIELD, nil, typenod(typ))
n.Isddd = isddd
if named {
name := p.string()
if name == "" {
Fatalf("expected named parameter")
}
// The parameter package doesn't matter; it's never consulted.
// We use the builtinpkg per go.y:sym (line 1181).
n.Left = newname(builtinpkg.Lookup(name))
}
// TODO(gri) This is compiler-specific (escape info).
// Move into compiler-specific section eventually?
n.SetVal(p.note())
return n
}
func (p *importer) value(typ *Type) (x Val) {
switch tag := p.tagOrIndex(); tag {
case falseTag:
x.U = false
case trueTag:
x.U = true
case int64Tag:
u := new(Mpint)
Mpmovecfix(u, p.int64())
u.Rune = typ == idealrune
x.U = u
case floatTag:
f := newMpflt()
p.float(f)
if typ == idealint || Isint[typ.Etype] {
// uncommon case: large int encoded as float
u := new(Mpint)
mpmovefltfix(u, f)
x.U = u
break
}
x.U = f
case complexTag:
u := new(Mpcplx)
p.float(&u.Real)
p.float(&u.Imag)
x.U = u
case stringTag:
x.U = p.string()
default:
Fatalf("unexpected value tag %d", tag)
}
// verify ideal type
if isideal(typ) && untype(x.Ctype()) != typ {
Fatalf("value %v and type %v don't match", x, typ)
}
return
}
func (p *importer) float(x *Mpflt) {
sign := p.int()
if sign == 0 {
Mpmovecflt(x, 0)
return
}
exp := p.int()
mant := new(big.Int).SetBytes([]byte(p.string()))
m := x.Val.SetInt(mant)
m.SetMantExp(m, exp-mant.BitLen())
if sign < 0 {
m.Neg(m)
}
}
// ----------------------------------------------------------------------------
// Inlined function bodies
func (p *importer) body() {
p.int()
p.block()
}
func (p *importer) block() {
for i := p.int(); i > 0; i-- {
p.stmt()
}
}
func (p *importer) stmt() {
// TODO(gri) do something sensible here
p.string()
}
// ----------------------------------------------------------------------------
// Low-level decoders
func (p *importer) tagOrIndex() int {
if p.debugFormat {
p.marker('t')
}
return int(p.rawInt64())
}
func (p *importer) int() int {
x := p.int64()
if int64(int(x)) != x {
Fatalf("exported integer too large")
}
return int(x)
}
func (p *importer) int64() int64 {
if p.debugFormat {
p.marker('i')
}
return p.rawInt64()
}
func (p *importer) string() string {
if p.debugFormat {
p.marker('s')
}
if n := int(p.rawInt64()); n > 0 {
if cap(p.buf) < n {
p.buf = make([]byte, n)
} else {
p.buf = p.buf[:n]
}
for i := 0; i < n; i++ {
p.buf[i] = p.byte()
}
return string(p.buf)
}
return ""
}
func (p *importer) marker(want byte) {
if got := p.byte(); got != want {
Fatalf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
}
pos := p.read
if n := int(p.rawInt64()); n != pos {
Fatalf("incorrect position: got %d; want %d", n, pos)
}
}
// rawInt64 should only be used by low-level decoders
func (p *importer) rawInt64() int64 {
i, err := binary.ReadVarint(p)
if err != nil {
Fatalf("read error: %v", err)
}
return i
}
// needed for binary.ReadVarint in rawInt64
func (p *importer) ReadByte() (byte, error) {
return p.byte(), nil
}
// byte is the bottleneck interface for reading from p.in.
// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
func (p *importer) byte() byte {
c := obj.Bgetc(p.in)
p.read++
if c < 0 {
Fatalf("read error")
}
if c == '|' {
c = obj.Bgetc(p.in)
p.read++
if c < 0 {
Fatalf("read error")
}
switch c {
case 'S':
c = '$'
case '|':
// nothing to do
default:
Fatalf("unexpected escape sequence in export data")
}
}
return byte(c)
}

View File

@ -4,7 +4,6 @@ package gc
const runtimeimport = "" +
"package runtime\n" +
"import runtime \"runtime\"\n" +
"func @\"\".newobject (@\"\".typ·2 *byte) (? *any)\n" +
"func @\"\".panicindex ()\n" +
"func @\"\".panicslice ()\n" +
@ -87,7 +86,7 @@ const runtimeimport = "" +
"func @\"\".chanrecv2 (@\"\".chanType·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (? bool)\n" +
"func @\"\".chansend1 (@\"\".chanType·1 *byte, @\"\".hchan·2 chan<- any, @\"\".elem·3 *any)\n" +
"func @\"\".closechan (@\"\".hchan·1 any)\n" +
"var @\"\".writeBarrierEnabled bool\n" +
"var @\"\".writeBarrier struct { @\"\".enabled bool; @\"\".needed bool; @\"\".cgo bool }\n" +
"func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
"func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n" +
@ -157,12 +156,13 @@ const runtimeimport = "" +
"func @\"\".racewrite (? uintptr)\n" +
"func @\"\".racereadrange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
"func @\"\".racewriterange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
"func @\"\".msanread (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
"func @\"\".msanwrite (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" +
"\n" +
"$$\n"
const unsafeimport = "" +
"package unsafe\n" +
"import runtime \"runtime\"\n" +
"type @\"\".Pointer uintptr\n" +
"func @\"\".Offsetof (? any) (? uintptr)\n" +
"func @\"\".Sizeof (? any) (? uintptr)\n" +

View File

@ -108,7 +108,11 @@ func chanrecv2(chanType *byte, hchan <-chan any, elem *any) bool
func chansend1(chanType *byte, hchan chan<- any, elem *any)
func closechan(hchan any)
var writeBarrierEnabled bool
var writeBarrier struct {
enabled bool
needed bool
cgo bool
}
func writebarrierptr(dst *any, src any)
func writebarrierstring(dst *any, src any)
@ -195,3 +199,7 @@ func raceread(uintptr)
func racewrite(uintptr)
func racereadrange(addr, size uintptr)
func racewriterange(addr, size uintptr)
// memory sanitizer
func msanread(addr, size uintptr)
func msanwrite(addr, size uintptr)

View File

@ -56,7 +56,7 @@ func (b *bulkBvec) next() Bvec {
return out
}
/* difference */
// difference
func bvandnot(dst Bvec, src1 Bvec, src2 Bvec) {
for i, x := range src1.b {
dst.b[i] = x &^ src2.b[i]
@ -151,14 +151,14 @@ func bvnot(bv Bvec) {
}
}
/* union */
// union
func bvor(dst Bvec, src1 Bvec, src2 Bvec) {
for i, x := range src1.b {
dst.b[i] = x | src2.b[i]
}
}
/* intersection */
// intersection
func bvand(dst Bvec, src1 Bvec, src2 Bvec) {
for i, x := range src1.b {
dst.b[i] = x & src2.b[i]

View File

@ -6,15 +6,14 @@ package gc
import (
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
"fmt"
)
/*
* generate:
* res = n;
* simplifies and calls Thearch.Gmove.
* if wb is true, need to emit write barriers.
*/
// generate:
// res = n;
// simplifies and calls Thearch.Gmove.
// if wb is true, need to emit write barriers.
func Cgen(n, res *Node) {
cgen_wb(n, res, false)
}
@ -190,7 +189,7 @@ func cgen_wb(n, res *Node, wb bool) {
}
if wb {
if int(Simtype[res.Type.Etype]) != Tptr {
if Simtype[res.Type.Etype] != Tptr {
Fatalf("cgen_wb of type %v", res.Type)
}
if n.Ullman >= UINF {
@ -253,7 +252,7 @@ func cgen_wb(n, res *Node, wb bool) {
return
}
if Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
if Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
// if both are addressable, move
if n.Addable {
if n.Op == OREGISTER || res.Op == OREGISTER {
@ -397,7 +396,7 @@ func cgen_wb(n, res *Node, wb bool) {
goto sbop
}
a := Thearch.Optoas(int(n.Op), nl.Type)
a := Thearch.Optoas(n.Op, nl.Type)
// unary
var n1 Node
Regalloc(&n1, nl.Type, res)
@ -434,15 +433,15 @@ func cgen_wb(n, res *Node, wb bool) {
OXOR,
OADD,
OMUL:
if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(int(n.Op), nl, nr, res) {
if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(n.Op, nl, nr, res) {
break
}
a = Thearch.Optoas(int(n.Op), nl.Type)
a = Thearch.Optoas(n.Op, nl.Type)
goto sbop
// asymmetric binary
case OSUB:
a = Thearch.Optoas(int(n.Op), nl.Type)
a = Thearch.Optoas(n.Op, nl.Type)
goto abop
case OHMUL:
@ -656,7 +655,7 @@ func cgen_wb(n, res *Node, wb bool) {
case OMOD, ODIV:
if Isfloat[n.Type.Etype] || Thearch.Dodiv == nil {
a = Thearch.Optoas(int(n.Op), nl.Type)
a = Thearch.Optoas(n.Op, nl.Type)
goto abop
}
@ -664,7 +663,7 @@ func cgen_wb(n, res *Node, wb bool) {
var n1 Node
Regalloc(&n1, nl.Type, res)
Cgen(nl, &n1)
cgen_div(int(n.Op), &n1, nr, res)
cgen_div(n.Op, &n1, nr, res)
Regfree(&n1)
} else {
var n2 Node
@ -675,34 +674,32 @@ func cgen_wb(n, res *Node, wb bool) {
n2 = *nr
}
cgen_div(int(n.Op), nl, &n2, res)
cgen_div(n.Op, nl, &n2, res)
if n2.Op != OLITERAL {
Regfree(&n2)
}
}
case OLSH, ORSH, OLROT:
Thearch.Cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
Thearch.Cgen_shift(n.Op, n.Bounded, nl, nr, res)
}
return
/*
* put simplest on right - we'll generate into left
* and then adjust it using the computation of right.
* constants and variables have the same ullman
* count, so look for constants specially.
*
* an integer constant we can use as an immediate
* is simpler than a variable - we can use the immediate
* in the adjustment instruction directly - so it goes
* on the right.
*
* other constants, like big integers or floating point
* constants, require a mov into a register, so those
* might as well go on the left, so we can reuse that
* register for the computation.
*/
// put simplest on right - we'll generate into left
// and then adjust it using the computation of right.
// constants and variables have the same ullman
// count, so look for constants specially.
//
// an integer constant we can use as an immediate
// is simpler than a variable - we can use the immediate
// in the adjustment instruction directly - so it goes
// on the right.
//
// other constants, like big integers or floating point
// constants, require a mov into a register, so those
// might as well go on the left, so we can reuse that
// register for the computation.
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
nl, nr = nr, nl
@ -755,14 +752,14 @@ abop: // asymmetric binary
Regalloc(&n1, nl.Type, res)
Cgen(nl, &n1)
if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
n2 = *nr
} else {
Regalloc(&n2, nr.Type, nil)
Cgen(nr, &n2)
}
} else {
if Smallintconst(nr) && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
n2 = *nr
} else {
Regalloc(&n2, nr.Type, res)
@ -783,8 +780,13 @@ abop: // asymmetric binary
var sys_wbptr *Node
func cgen_wbptr(n, res *Node) {
if Curfn != nil && Curfn.Func.Nowritebarrier {
Yyerror("write barrier prohibited")
if Curfn != nil {
if Curfn.Func.Nowritebarrier {
Yyerror("write barrier prohibited")
}
if Curfn.Func.WBLineno == 0 {
Curfn.Func.WBLineno = lineno
}
}
if Debug_wb > 0 {
Warn("write barrier")
@ -799,7 +801,9 @@ func cgen_wbptr(n, res *Node) {
Cgenr(n, &src, nil)
}
wbEnabled := syslook("writeBarrierEnabled", 0)
wbVar := syslook("writeBarrier", 0)
wbEnabled := Nod(ODOT, wbVar, newname(wbVar.Type.Type.Sym))
wbEnabled = typecheck(&wbEnabled, Erv)
pbr := Thearch.Ginscmp(ONE, Types[TUINT8], wbEnabled, Nodintconst(0), -1)
Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, &dst)
pjmp := Gbranch(obj.AJMP, nil, 0)
@ -826,8 +830,13 @@ func cgen_wbptr(n, res *Node) {
}
func cgen_wbfat(n, res *Node) {
if Curfn != nil && Curfn.Func.Nowritebarrier {
Yyerror("write barrier prohibited")
if Curfn != nil {
if Curfn.Func.Nowritebarrier {
Yyerror("write barrier prohibited")
}
if Curfn.Func.WBLineno == 0 {
Curfn.Func.WBLineno = lineno
}
}
if Debug_wb > 0 {
Warn("write barrier")
@ -909,11 +918,9 @@ func Mfree(n *Node) {
}
}
/*
* allocate a register (reusing res if possible) and generate
* a = n
* The caller must call Regfree(a).
*/
// allocate a register (reusing res if possible) and generate
// a = n
// The caller must call Regfree(a).
func Cgenr(n *Node, a *Node, res *Node) {
if Debug['g'] != 0 {
Dump("cgenr-n", n)
@ -949,12 +956,10 @@ func Cgenr(n *Node, a *Node, res *Node) {
}
}
/*
* allocate a register (reusing res if possible) and generate
* a = &n
* The caller must call Regfree(a).
* The generated code checks that the result is not nil.
*/
// allocate a register (reusing res if possible) and generate
// a = &n
// The caller must call Regfree(a).
// The generated code checks that the result is not nil.
func Agenr(n *Node, a *Node, res *Node) {
if Debug['g'] != 0 {
Dump("\nagenr-n", n)
@ -1468,11 +1473,9 @@ func log2(n uint64) int {
return x
}
/*
* generate:
* res = &n;
* The generated code checks that the result is not nil.
*/
// generate:
// res = &n;
// The generated code checks that the result is not nil.
func Agen(n *Node, res *Node) {
if Debug['g'] != 0 {
Dump("\nagen-res", res)
@ -1829,8 +1832,8 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
// but they don't support direct generation of a bool value yet.
// We can fix that as we go.
switch Ctxt.Arch.Thechar {
case '5', '7', '9':
Fatalf("genval 5g, 7g, 9g ONAMES not fully implemented")
case '0', '5', '7', '9':
Fatalf("genval 0g, 5g, 7g, 9g ONAMES not fully implemented")
}
Cgen(n, res)
if !wantTrue {
@ -1839,7 +1842,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
return
}
if n.Addable && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
// no need for a temporary
bgenNonZero(n, nil, wantTrue, likely, to)
return
@ -1912,7 +1915,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
// n.Op is one of OEQ, ONE, OLT, OGT, OLE, OGE
nl := n.Left
nr := n.Right
a := int(n.Op)
op := n.Op
if !wantTrue {
if Isfloat[nr.Type.Etype] {
@ -1935,19 +1938,19 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
return
}
a = Brcom(a)
op = Brcom(op)
}
wantTrue = true
// make simplest on right
if nl.Op == OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < UINF) {
a = Brrev(a)
op = Brrev(op)
nl, nr = nr, nl
}
if Isslice(nl.Type) || Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != OEQ && a != ONE) || nr.Op != OLITERAL {
if (op != OEQ && op != ONE) || nr.Op != OLITERAL {
if Isslice(nl.Type) {
Yyerror("illegal slice comparison")
} else {
@ -1966,13 +1969,13 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
Regalloc(&tmp, ptr.Type, &ptr)
Cgen(&ptr, &tmp)
Regfree(&ptr)
bgenNonZero(&tmp, res, a == OEQ != wantTrue, likely, to)
bgenNonZero(&tmp, res, op == OEQ != wantTrue, likely, to)
Regfree(&tmp)
return
}
if Iscomplex[nl.Type.Etype] {
complexbool(a, nl, nr, res, wantTrue, likely, to)
complexbool(op, nl, nr, res, wantTrue, likely, to)
return
}
@ -1988,7 +1991,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
if !nr.Addable {
nr = CgenTemp(nr)
}
Thearch.Cmp64(nl, nr, a, likely, to)
Thearch.Cmp64(nl, nr, op, likely, to)
return
}
@ -2023,9 +2026,9 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
Cgen(nl, &n1)
nl = &n1
if Smallintconst(nr) && Ctxt.Arch.Thechar != '9' {
if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '9' {
Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
bins(nr.Type, res, a, likely, to)
bins(nr.Type, res, op, likely, to)
return
}
@ -2043,9 +2046,16 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
l, r := nl, nr
// On x86, only < and <= work right with NaN; reverse if needed
if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (a == OGT || a == OGE) {
if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (op == OGT || op == OGE) {
l, r = r, l
a = Brrev(a)
op = Brrev(op)
}
// MIPS does not have CMP instruction
if Ctxt.Arch.Thechar == '0' {
p := Thearch.Ginscmp(op, nr.Type, l, r, likely)
Patch(p, to)
return
}
// Do the comparison.
@ -2062,10 +2072,10 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
switch n.Op {
case ONE:
Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, likely), to)
Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
default:
p := Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, -likely)
Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
Patch(p, Pc)
}
return
@ -2111,12 +2121,12 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
// On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
// TODO(josh): Convert a <= b to b > a instead?
case OLE, OGE:
if a == OLE {
a = OLT
if op == OLE {
op = OLT
} else {
a = OGT
op = OGT
}
Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
Patch(Gbranch(Thearch.Optoas(OEQ, nr.Type), nr.Type, likely), to)
return
}
@ -2124,26 +2134,35 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
}
// Not a special case. Insert the conditional jump or value gen.
bins(nr.Type, res, a, likely, to)
bins(nr.Type, res, op, likely, to)
}
func bgenNonZero(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
// TODO: Optimize on systems that can compare to zero easily.
a := ONE
var op Op = ONE
if !wantTrue {
a = OEQ
op = OEQ
}
// MIPS does not have CMP instruction
if Thearch.Thechar == '0' {
p := Gbranch(Thearch.Optoas(op, n.Type), n.Type, likely)
Naddr(&p.From, n)
Patch(p, to)
return
}
var zero Node
Nodconst(&zero, n.Type, 0)
Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &zero)
bins(n.Type, res, a, likely, to)
bins(n.Type, res, op, likely, to)
}
// bins inserts an instruction to handle the result of a compare.
// If res is non-nil, it inserts appropriate value generation instructions.
// If res is nil, it inserts a branch to to.
func bins(typ *Type, res *Node, a, likely int, to *obj.Prog) {
a = Thearch.Optoas(a, typ)
func bins(typ *Type, res *Node, op Op, likely int, to *obj.Prog) {
a := Thearch.Optoas(op, typ)
if res != nil {
// value gen
Thearch.Ginsboolval(a, res)
@ -2219,11 +2238,9 @@ func stkof(n *Node) int64 {
return -1000 // not on stack
}
/*
* block copy:
* memmove(&ns, &n, w);
* if wb is true, needs write barrier.
*/
// block copy:
// memmove(&ns, &n, w);
// if wb is true, needs write barrier.
func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
if Debug['g'] != 0 {
op := "sgen"
@ -2301,15 +2318,13 @@ func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
Thearch.Blockcopy(n, ns, osrc, odst, w)
}
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
// generate:
// call f
// proc=-1 normal call but no return
// proc=0 normal call
// proc=1 goroutine run in new proc
// proc=2 defer call save away stack
// proc=3 normal call to C pointer (not Go func value)
func Ginscall(f *Node, proc int) {
if f.Type != nil {
extra := int32(0)
@ -2327,15 +2342,39 @@ func Ginscall(f *Node, proc int) {
-1: // normal call but no return
if f.Op == ONAME && f.Class == PFUNC {
if f == Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
// Deferred calls will appear to be returning to the CALL
// deferreturn(SB) that we are about to emit. However, the
// stack scanning code will think that the instruction
// before the CALL is executing. To avoid the scanning
// code making bad assumptions (both cosmetic such as
// showing the wrong line number and fatal, such as being
// confused over whether a stack slot contains a pointer
// or a scalar) insert an actual hardware NOP that will
// have the right line number. This is different from
// obj.ANOP, which is a virtual no-op that doesn't make it
// into the instruction stream.
Thearch.Ginsnop()
if Thearch.Thechar == '9' {
// On ppc64, when compiling Go into position
// independent code on ppc64le we insert an
// instruction to reload the TOC pointer from the
// stack as well. See the long comment near
// jmpdefer in runtime/asm_ppc64.s for why.
// If the MOVD is not needed, insert a hardware NOP
// so that the same number of instructions are used
// on ppc64 in both shared and non-shared modes.
if Ctxt.Flag_shared != 0 {
p := Thearch.Gins(ppc64.AMOVD, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Offset = 24
p.From.Reg = ppc64.REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R2
} else {
Thearch.Ginsnop()
}
}
}
p := Thearch.Gins(obj.ACALL, nil, f)
@ -2395,10 +2434,8 @@ func Ginscall(f *Node, proc int) {
}
}
/*
* n is call to interface method.
* generate res = n.
*/
// n is call to interface method.
// generate res = n.
func cgen_callinter(n *Node, res *Node, proc int) {
i := n.Left
if i.Op != ODOTINTER {
@ -2468,12 +2505,10 @@ func cgen_callinter(n *Node, res *Node, proc int) {
Regfree(&nodo)
}
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
// generate function call;
// proc=0 normal call
// proc=1 goroutine run in new proc
// proc=2 defer call save away stack
func cgen_call(n *Node, proc int) {
if n == nil {
return
@ -2519,11 +2554,9 @@ func cgen_call(n *Node, proc int) {
Ginscall(n.Left, proc)
}
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
// call to n has already been generated.
// generate:
// res = return value from call.
func cgen_callret(n *Node, res *Node) {
t := n.Left.Type
if t.Etype == TPTR32 || t.Etype == TPTR64 {
@ -2546,11 +2579,9 @@ func cgen_callret(n *Node, res *Node) {
Cgen_as(res, &nod)
}
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
// call to n has already been generated.
// generate:
// res = &return value from call.
func cgen_aret(n *Node, res *Node) {
t := n.Left.Type
if Isptr[t.Etype] {
@ -2581,10 +2612,8 @@ func cgen_aret(n *Node, res *Node) {
}
}
/*
* generate return.
* n->left is assignments to return values.
*/
// generate return.
// n->left is assignments to return values.
func cgen_ret(n *Node) {
if n != nil {
Genlist(n.List) // copy out args
@ -2601,19 +2630,17 @@ func cgen_ret(n *Node) {
}
}
/*
* generate division according to op, one of:
* res = nl / nr
* res = nl % nr
*/
func cgen_div(op int, nl *Node, nr *Node, res *Node) {
// generate division according to op, one of:
// res = nl / nr
// res = nl % nr
func cgen_div(op Op, nl *Node, nr *Node, res *Node) {
var w int
// TODO(rsc): arm64 needs to support the relevant instructions
// in peep and optoas in order to enable this.
// TODO(rsc): ppc64 needs to support the relevant instructions
// in peep and optoas in order to enable this.
if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
goto longdiv
}
w = int(nl.Type.Width * 8)

View File

@ -9,9 +9,7 @@ import (
"fmt"
)
/*
* function literals aka closures
*/
// function literals aka closures
func closurehdr(ntype *Node) {
var name *Node
var a *Node
@ -179,10 +177,8 @@ func closurename(n *Node) *Sym {
}
func makeclosure(func_ *Node) *Node {
/*
* wrap body in external function
* that begins by reading closure parameters.
*/
// wrap body in external function
// that begins by reading closure parameters.
xtype := Nod(OTFUNC, nil, nil)
xtype.List = func_.List

View File

@ -59,10 +59,8 @@ func (n *Node) Bool() bool {
return n.Val().U.(bool)
}
/*
* truncate float literal fv to 32-bit or 64-bit precision
* according to type; return truncated value.
*/
// truncate float literal fv to 32-bit or 64-bit precision
// according to type; return truncated value.
func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
if t == nil {
return oldv
@ -90,19 +88,15 @@ func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
return fv
}
/*
* convert n, if literal, to type t.
* implicit conversion.
*/
// convert n, if literal, to type t.
// implicit conversion.
func Convlit(np **Node, t *Type) {
convlit1(np, t, false)
}
/*
* convert n, if literal, to type t.
* return a new node if necessary
* (if n is a named constant, can't edit n->type directly).
*/
// convert n, if literal, to type t.
// return a new node if necessary
//(if n is a named constant, can't edit n->type directly).
func convlit1(np **Node, t *Type, explicit bool) {
n := *np
if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
@ -255,7 +249,7 @@ func convlit1(np **Node, t *Type, explicit bool) {
if n.Type.Etype == TUNSAFEPTR && t.Etype != TUINTPTR {
goto bad
}
ct := int(n.Val().Ctype())
ct := n.Val().Ctype()
if Isint[et] {
switch ct {
default:
@ -265,7 +259,6 @@ func convlit1(np **Node, t *Type, explicit bool) {
n.SetVal(toint(n.Val()))
fallthrough
// flowthrough
case CTINT:
overflow(n.Val(), t)
}
@ -278,7 +271,6 @@ func convlit1(np **Node, t *Type, explicit bool) {
n.SetVal(toflt(n.Val()))
fallthrough
// flowthrough
case CTFLT:
n.SetVal(Val{truncfltlit(n.Val().U.(*Mpflt), t)})
}
@ -289,6 +281,7 @@ func convlit1(np **Node, t *Type, explicit bool) {
case CTFLT, CTINT, CTRUNE:
n.SetVal(tocplx(n.Val()))
fallthrough
case CTCPLX:
overflow(n.Val(), t)
@ -474,14 +467,14 @@ func tostr(v Val) Val {
return v
}
func consttype(n *Node) int {
func consttype(n *Node) Ctype {
if n == nil || n.Op != OLITERAL {
return -1
}
return int(n.Val().Ctype())
return n.Val().Ctype()
}
func Isconst(n *Node, ct int) bool {
func Isconst(n *Node, ct Ctype) bool {
t := consttype(n)
// If the caller is asking for CTINT, allow CTRUNE too.
@ -501,9 +494,7 @@ func saveorig(n *Node) *Node {
return n.Orig
}
/*
* if n is constant, rewrite as OLITERAL node.
*/
// if n is constant, rewrite as OLITERAL node.
func evconst(n *Node) {
// pick off just the opcodes that can be
// constant evaluated.
@ -596,6 +587,42 @@ func evconst(n *Node) {
wl = TIDEAL
}
// avoid constant conversions in switches below
const (
CTINT_ = uint32(CTINT)
CTRUNE_ = uint32(CTRUNE)
CTFLT_ = uint32(CTFLT)
CTCPLX_ = uint32(CTCPLX)
CTSTR_ = uint32(CTSTR)
CTBOOL_ = uint32(CTBOOL)
CTNIL_ = uint32(CTNIL)
OCONV_ = uint32(OCONV) << 16
OARRAYBYTESTR_ = uint32(OARRAYBYTESTR) << 16
OPLUS_ = uint32(OPLUS) << 16
OMINUS_ = uint32(OMINUS) << 16
OCOM_ = uint32(OCOM) << 16
ONOT_ = uint32(ONOT) << 16
OLSH_ = uint32(OLSH) << 16
ORSH_ = uint32(ORSH) << 16
OADD_ = uint32(OADD) << 16
OSUB_ = uint32(OSUB) << 16
OMUL_ = uint32(OMUL) << 16
ODIV_ = uint32(ODIV) << 16
OMOD_ = uint32(OMOD) << 16
OOR_ = uint32(OOR) << 16
OAND_ = uint32(OAND) << 16
OANDNOT_ = uint32(OANDNOT) << 16
OXOR_ = uint32(OXOR) << 16
OEQ_ = uint32(OEQ) << 16
ONE_ = uint32(ONE) << 16
OLT_ = uint32(OLT) << 16
OLE_ = uint32(OLE) << 16
OGE_ = uint32(OGE) << 16
OGT_ = uint32(OGT) << 16
OOROR_ = uint32(OOROR) << 16
OANDAND_ = uint32(OANDAND) << 16
)
nr := n.Right
var rv Val
var lno int
@ -617,11 +644,10 @@ func evconst(n *Node) {
Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), nl.Type)
n.Diag = 1
}
return
case OCONV<<16 | CTNIL,
OARRAYBYTESTR<<16 | CTNIL:
case OCONV_ | CTNIL_,
OARRAYBYTESTR_ | CTNIL_:
if n.Type.Etype == TSTRING {
v = tostr(v)
nl.Type = n.Type
@ -630,24 +656,24 @@ func evconst(n *Node) {
fallthrough
// fall through
case OCONV<<16 | CTINT,
OCONV<<16 | CTRUNE,
OCONV<<16 | CTFLT,
OCONV<<16 | CTSTR:
case OCONV_ | CTINT_,
OCONV_ | CTRUNE_,
OCONV_ | CTFLT_,
OCONV_ | CTSTR_:
convlit1(&nl, n.Type, true)
v = nl.Val()
case OPLUS<<16 | CTINT,
OPLUS<<16 | CTRUNE:
case OPLUS_ | CTINT_,
OPLUS_ | CTRUNE_:
break
case OMINUS<<16 | CTINT,
OMINUS<<16 | CTRUNE:
case OMINUS_ | CTINT_,
OMINUS_ | CTRUNE_:
mpnegfix(v.U.(*Mpint))
case OCOM<<16 | CTINT,
OCOM<<16 | CTRUNE:
case OCOM_ | CTINT_,
OCOM_ | CTRUNE_:
et := Txxx
if nl.Type != nil {
et = int(nl.Type.Etype)
@ -673,20 +699,20 @@ func evconst(n *Node) {
mpxorfixfix(v.U.(*Mpint), &b)
case OPLUS<<16 | CTFLT:
case OPLUS_ | CTFLT_:
break
case OMINUS<<16 | CTFLT:
case OMINUS_ | CTFLT_:
mpnegflt(v.U.(*Mpflt))
case OPLUS<<16 | CTCPLX:
case OPLUS_ | CTCPLX_:
break
case OMINUS<<16 | CTCPLX:
case OMINUS_ | CTCPLX_:
mpnegflt(&v.U.(*Mpcplx).Real)
mpnegflt(&v.U.(*Mpcplx).Imag)
case ONOT<<16 | CTBOOL:
case ONOT_ | CTBOOL_:
if !v.U.(bool) {
goto settrue
}
@ -797,20 +823,20 @@ func evconst(n *Node) {
default:
goto illegal
case OADD<<16 | CTINT,
OADD<<16 | CTRUNE:
case OADD_ | CTINT_,
OADD_ | CTRUNE_:
mpaddfixfix(v.U.(*Mpint), rv.U.(*Mpint), 0)
case OSUB<<16 | CTINT,
OSUB<<16 | CTRUNE:
case OSUB_ | CTINT_,
OSUB_ | CTRUNE_:
mpsubfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OMUL<<16 | CTINT,
OMUL<<16 | CTRUNE:
case OMUL_ | CTINT_,
OMUL_ | CTRUNE_:
mpmulfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case ODIV<<16 | CTINT,
ODIV<<16 | CTRUNE:
case ODIV_ | CTINT_,
ODIV_ | CTRUNE_:
if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
Yyerror("division by zero")
mpsetovf(v.U.(*Mpint))
@ -819,8 +845,8 @@ func evconst(n *Node) {
mpdivfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OMOD<<16 | CTINT,
OMOD<<16 | CTRUNE:
case OMOD_ | CTINT_,
OMOD_ | CTRUNE_:
if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
Yyerror("division by zero")
mpsetovf(v.U.(*Mpint))
@ -829,40 +855,40 @@ func evconst(n *Node) {
mpmodfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OLSH<<16 | CTINT,
OLSH<<16 | CTRUNE:
case OLSH_ | CTINT_,
OLSH_ | CTRUNE_:
mplshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case ORSH<<16 | CTINT,
ORSH<<16 | CTRUNE:
case ORSH_ | CTINT_,
ORSH_ | CTRUNE_:
mprshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OOR<<16 | CTINT,
OOR<<16 | CTRUNE:
case OOR_ | CTINT_,
OOR_ | CTRUNE_:
mporfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OAND<<16 | CTINT,
OAND<<16 | CTRUNE:
case OAND_ | CTINT_,
OAND_ | CTRUNE_:
mpandfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OANDNOT<<16 | CTINT,
OANDNOT<<16 | CTRUNE:
case OANDNOT_ | CTINT_,
OANDNOT_ | CTRUNE_:
mpandnotfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OXOR<<16 | CTINT,
OXOR<<16 | CTRUNE:
case OXOR_ | CTINT_,
OXOR_ | CTRUNE_:
mpxorfixfix(v.U.(*Mpint), rv.U.(*Mpint))
case OADD<<16 | CTFLT:
case OADD_ | CTFLT_:
mpaddfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
case OSUB<<16 | CTFLT:
case OSUB_ | CTFLT_:
mpsubfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
case OMUL<<16 | CTFLT:
case OMUL_ | CTFLT_:
mpmulfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
case ODIV<<16 | CTFLT:
case ODIV_ | CTFLT_:
if mpcmpfltc(rv.U.(*Mpflt), 0) == 0 {
Yyerror("division by zero")
Mpmovecflt(v.U.(*Mpflt), 1.0)
@ -873,7 +899,7 @@ func evconst(n *Node) {
// The default case above would print 'ideal % ideal',
// which is not quite an ideal error.
case OMOD<<16 | CTFLT:
case OMOD_ | CTFLT_:
if n.Diag == 0 {
Yyerror("illegal constant expression: floating-point %% operation")
n.Diag = 1
@ -881,18 +907,18 @@ func evconst(n *Node) {
return
case OADD<<16 | CTCPLX:
case OADD_ | CTCPLX_:
mpaddfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
mpaddfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
case OSUB<<16 | CTCPLX:
case OSUB_ | CTCPLX_:
mpsubfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
mpsubfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
case OMUL<<16 | CTCPLX:
case OMUL_ | CTCPLX_:
cmplxmpy(v.U.(*Mpcplx), rv.U.(*Mpcplx))
case ODIV<<16 | CTCPLX:
case ODIV_ | CTCPLX_:
if mpcmpfltc(&rv.U.(*Mpcplx).Real, 0) == 0 && mpcmpfltc(&rv.U.(*Mpcplx).Imag, 0) == 0 {
Yyerror("complex division by zero")
Mpmovecflt(&rv.U.(*Mpcplx).Real, 1.0)
@ -902,157 +928,157 @@ func evconst(n *Node) {
cmplxdiv(v.U.(*Mpcplx), rv.U.(*Mpcplx))
case OEQ<<16 | CTNIL:
case OEQ_ | CTNIL_:
goto settrue
case ONE<<16 | CTNIL:
case ONE_ | CTNIL_:
goto setfalse
case OEQ<<16 | CTINT,
OEQ<<16 | CTRUNE:
case OEQ_ | CTINT_,
OEQ_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) == 0 {
goto settrue
}
goto setfalse
case ONE<<16 | CTINT,
ONE<<16 | CTRUNE:
case ONE_ | CTINT_,
ONE_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) != 0 {
goto settrue
}
goto setfalse
case OLT<<16 | CTINT,
OLT<<16 | CTRUNE:
case OLT_ | CTINT_,
OLT_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) < 0 {
goto settrue
}
goto setfalse
case OLE<<16 | CTINT,
OLE<<16 | CTRUNE:
case OLE_ | CTINT_,
OLE_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) <= 0 {
goto settrue
}
goto setfalse
case OGE<<16 | CTINT,
OGE<<16 | CTRUNE:
case OGE_ | CTINT_,
OGE_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) >= 0 {
goto settrue
}
goto setfalse
case OGT<<16 | CTINT,
OGT<<16 | CTRUNE:
case OGT_ | CTINT_,
OGT_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) > 0 {
goto settrue
}
goto setfalse
case OEQ<<16 | CTFLT:
case OEQ_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) == 0 {
goto settrue
}
goto setfalse
case ONE<<16 | CTFLT:
case ONE_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) != 0 {
goto settrue
}
goto setfalse
case OLT<<16 | CTFLT:
case OLT_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) < 0 {
goto settrue
}
goto setfalse
case OLE<<16 | CTFLT:
case OLE_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) <= 0 {
goto settrue
}
goto setfalse
case OGE<<16 | CTFLT:
case OGE_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) >= 0 {
goto settrue
}
goto setfalse
case OGT<<16 | CTFLT:
case OGT_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) > 0 {
goto settrue
}
goto setfalse
case OEQ<<16 | CTCPLX:
case OEQ_ | CTCPLX_:
if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) == 0 && mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) == 0 {
goto settrue
}
goto setfalse
case ONE<<16 | CTCPLX:
case ONE_ | CTCPLX_:
if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) != 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) != 0 {
goto settrue
}
goto setfalse
case OEQ<<16 | CTSTR:
case OEQ_ | CTSTR_:
if strlit(nl) == strlit(nr) {
goto settrue
}
goto setfalse
case ONE<<16 | CTSTR:
case ONE_ | CTSTR_:
if strlit(nl) != strlit(nr) {
goto settrue
}
goto setfalse
case OLT<<16 | CTSTR:
case OLT_ | CTSTR_:
if strlit(nl) < strlit(nr) {
goto settrue
}
goto setfalse
case OLE<<16 | CTSTR:
case OLE_ | CTSTR_:
if strlit(nl) <= strlit(nr) {
goto settrue
}
goto setfalse
case OGE<<16 | CTSTR:
case OGE_ | CTSTR_:
if strlit(nl) >= strlit(nr) {
goto settrue
}
goto setfalse
case OGT<<16 | CTSTR:
case OGT_ | CTSTR_:
if strlit(nl) > strlit(nr) {
goto settrue
}
goto setfalse
case OOROR<<16 | CTBOOL:
case OOROR_ | CTBOOL_:
if v.U.(bool) || rv.U.(bool) {
goto settrue
}
goto setfalse
case OANDAND<<16 | CTBOOL:
case OANDAND_ | CTBOOL_:
if v.U.(bool) && rv.U.(bool) {
goto settrue
}
goto setfalse
case OEQ<<16 | CTBOOL:
case OEQ_ | CTBOOL_:
if v.U.(bool) == rv.U.(bool) {
goto settrue
}
goto setfalse
case ONE<<16 | CTBOOL:
case ONE_ | CTBOOL_:
if v.U.(bool) != rv.U.(bool) {
goto settrue
}
@ -1099,8 +1125,6 @@ illegal:
Yyerror("illegal constant expression: %v %v %v", nl.Type, Oconv(int(n.Op), 0), nr.Type)
n.Diag = 1
}
return
}
func nodlit(v Val) *Node {
@ -1146,7 +1170,7 @@ func nodcplxlit(r Val, i Val) *Node {
// idealkind returns a constant kind like consttype
// but for an arbitrary "ideal" (untyped constant) expression.
func idealkind(n *Node) int {
func idealkind(n *Node) Ctype {
if n == nil || !isideal(n.Type) {
return CTxxx
}
@ -1156,7 +1180,7 @@ func idealkind(n *Node) int {
return CTxxx
case OLITERAL:
return int(n.Val().Ctype())
return n.Val().Ctype()
// numeric kinds.
case OADD,
@ -1300,12 +1324,10 @@ num:
return
}
/*
* defaultlit on both nodes simultaneously;
* if they're both ideal going in they better
* get the same type going out.
* force means must assign concrete (non-ideal) type.
*/
// defaultlit on both nodes simultaneously;
// if they're both ideal going in they better
// get the same type going out.
// force means must assign concrete (non-ideal) type.
func defaultlit2(lp **Node, rp **Node, force int) {
l := *lp
r := *rp
@ -1406,11 +1428,9 @@ func nonnegconst(n *Node) int {
return -1
}
/*
* convert x to type et and back to int64
* for sign extension and truncation.
*/
func iconv(x int64, et int) int64 {
// convert x to type et and back to int64
// for sign extension and truncation.
func iconv(x int64, et EType) int64 {
switch et {
case TINT8:
x = int64(int8(x))

View File

@ -14,7 +14,7 @@ func overlap_cplx(f *Node, t *Node) bool {
return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
}
func complexbool(op int, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
func complexbool(op Op, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
// make both sides addable in ullman order
if nr != nil {
if nl.Ullman > nr.Ullman && !nl.Addable {
@ -130,7 +130,7 @@ func complexminus(nl *Node, res *Node) {
// build and execute tree
// real(res) = real(nl) op real(nr)
// imag(res) = imag(nl) op imag(nr)
func complexadd(op int, nl *Node, nr *Node, res *Node) {
func complexadd(op Op, nl *Node, nr *Node, res *Node) {
var n1 Node
var n2 Node
var n3 Node
@ -143,14 +143,14 @@ func complexadd(op int, nl *Node, nr *Node, res *Node) {
subnode(&n5, &n6, res)
var ra Node
ra.Op = uint8(op)
ra.Op = op
ra.Left = &n1
ra.Right = &n3
ra.Type = n1.Type
Cgen(&ra, &n5)
ra = Node{}
ra.Op = uint8(op)
ra.Op = op
ra.Left = &n2
ra.Right = &n4
ra.Type = n2.Type
@ -293,17 +293,10 @@ func Complexmove(f *Node, t *Node) {
ft := Simsimtype(f.Type)
tt := Simsimtype(t.Type)
switch uint32(ft)<<16 | uint32(tt) {
default:
Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
// complex to complex move/convert.
// complex to complex move/convert.
// make f addable.
// also use temporary if possible stack overlap.
case TCOMPLEX64<<16 | TCOMPLEX64,
TCOMPLEX64<<16 | TCOMPLEX128,
TCOMPLEX128<<16 | TCOMPLEX64,
TCOMPLEX128<<16 | TCOMPLEX128:
if (ft == TCOMPLEX64 || ft == TCOMPLEX128) && (tt == TCOMPLEX64 || tt == TCOMPLEX128) {
if !f.Addable || overlap_cplx(f, t) {
var tmp Node
Tempname(&tmp, f.Type)
@ -320,6 +313,8 @@ func Complexmove(f *Node, t *Node) {
Cgen(&n1, &n3)
Cgen(&n2, &n4)
} else {
Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
}
}
@ -471,7 +466,7 @@ func Complexgen(n *Node, res *Node) {
complexminus(nl, res)
case OADD, OSUB:
complexadd(int(n.Op), nl, nr, res)
complexadd(n.Op, nl, nr, res)
case OMUL:
complexmul(nl, nr, res)

View File

@ -23,9 +23,7 @@ func dflag() bool {
return true
}
/*
* declaration stack & operations
*/
// declaration stack & operations
func dcopy(a *Sym, b *Sym) {
a.Pkg = b.Pkg
a.Name = b.Name
@ -149,13 +147,11 @@ func redeclare(s *Sym, where string) {
var vargen int
/*
* declare individual names - var, typ, const
*/
// declare individual names - var, typ, const
var declare_typegen int
func declare(n *Node, ctxt uint8) {
func declare(n *Node, ctxt Class) {
if ctxt == PDISCARD {
return
}
@ -221,12 +217,12 @@ func declare(n *Node, ctxt uint8) {
s.Def = n
n.Name.Vargen = int32(gen)
n.Name.Funcdepth = Funcdepth
n.Class = uint8(ctxt)
n.Class = ctxt
autoexport(n, ctxt)
}
func addvar(n *Node, t *Type, ctxt uint8) {
func addvar(n *Node, t *Type, ctxt Class) {
if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
Fatalf("addvar: n=%v t=%v nil", n, t)
}
@ -236,10 +232,8 @@ func addvar(n *Node, t *Type, ctxt uint8) {
n.Type = t
}
/*
* declare variables from grammar
* new_name_list (type | [type] = expr_list)
*/
// declare variables from grammar
// new_name_list (type | [type] = expr_list)
func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
var init *NodeList
doexpr := el != nil
@ -302,10 +296,8 @@ func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
return init
}
/*
* declare constants from grammar
* new_name_list [[type] = expr_list]
*/
// declare constants from grammar
// new_name_list [[type] = expr_list]
func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
lno := int32(0) // default is to leave line number alone in listtreecopy
if cl == nil {
@ -350,10 +342,8 @@ func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
return vv
}
/*
* this generates a new name node,
* typically for labels or other one-off names.
*/
// this generates a new name node,
// typically for labels or other one-off names.
func newname(s *Sym) *Node {
if s == nil {
Fatalf("newname nil")
@ -377,10 +367,8 @@ func newfuncname(s *Sym) *Node {
return n
}
/*
* this generates a new name node for a name
* being declared.
*/
// this generates a new name node for a name
// being declared.
func dclname(s *Sym) *Node {
n := newname(s)
n.Op = ONONAME // caller will correct it
@ -400,12 +388,10 @@ func typenod(t *Type) *Node {
return t.Nod
}
/*
* this will return an old name
* that has already been pushed on the
* declaration list. a diagnostic is
* generated if no name has been defined.
*/
// this will return an old name
// that has already been pushed on the
// declaration list. a diagnostic is
// generated if no name has been defined.
func oldname(s *Sym) *Node {
n := s.Def
if n == nil {
@ -450,9 +436,7 @@ func oldname(s *Sym) *Node {
return n
}
/*
* := declarations
*/
// := declarations
func colasname(n *Node) bool {
switch n.Op {
case ONAME,
@ -532,10 +516,8 @@ func colas(left *NodeList, right *NodeList, lno int32) *Node {
return as
}
/*
* declare the arguments in an
* interface field declaration.
*/
// declare the arguments in an
// interface field declaration.
func ifacedcl(n *Node) {
if n.Op != ODCLFIELD || n.Right == nil {
Fatalf("ifacedcl")
@ -563,12 +545,10 @@ func ifacedcl(n *Node) {
funcbody(n)
}
/*
* declare the function proper
* and declare the arguments.
* called in extern-declaration context
* returns in auto-declaration context.
*/
// declare the function proper
// and declare the arguments.
// called in extern-declaration context
// returns in auto-declaration context.
func funchdr(n *Node) {
// change the declaration context from extern to auto
if Funcdepth == 0 && dclcontext != PEXTERN {
@ -688,11 +668,9 @@ func funcargs(nt *Node) {
}
}
/*
* Same as funcargs, except run over an already constructed TFUNC.
* This happens during import, where the hidden_fndcl rule has
* used functype directly to parse the function's type.
*/
// Same as funcargs, except run over an already constructed TFUNC.
// This happens during import, where the hidden_fndcl rule has
// used functype directly to parse the function's type.
func funcargs2(t *Type) {
if t.Etype != TFUNC {
Fatalf("funcargs2 %v", t)
@ -735,11 +713,9 @@ func funcargs2(t *Type) {
}
}
/*
* finish the body.
* called in auto-declaration context.
* returns in extern-declaration context.
*/
// finish the body.
// called in auto-declaration context.
// returns in extern-declaration context.
func funcbody(n *Node) {
// change the declaration context from auto to extern
if dclcontext != PAUTO {
@ -754,9 +730,7 @@ func funcbody(n *Node) {
}
}
/*
* new type being defined with name s.
*/
// new type being defined with name s.
func typedcl0(s *Sym) *Node {
n := newname(s)
n.Op = OTYPE
@ -764,21 +738,17 @@ func typedcl0(s *Sym) *Node {
return n
}
/*
* node n, which was returned by typedcl0
* is being declared to have uncompiled type t.
* return the ODCLTYPE node to use.
*/
// node n, which was returned by typedcl0
// is being declared to have uncompiled type t.
// return the ODCLTYPE node to use.
func typedcl1(n *Node, t *Node, local bool) *Node {
n.Name.Param.Ntype = t
n.Local = local
return Nod(ODCLTYPE, n, nil)
}
/*
* structs, functions, and methods.
* they don't belong here, but where do they belong?
*/
// structs, functions, and methods.
// they don't belong here, but where do they belong?
func checkembeddedtype(t *Type) {
if t == nil {
return
@ -869,16 +839,21 @@ func checkdupfields(t *Type, what string) {
lineno = int32(lno)
}
/*
* convert a parsed id/type list into
* a type for struct/interface/arglist
*/
// convert a parsed id/type list into
// a type for struct/interface/arglist
func tostruct(l *NodeList) *Type {
var f *Type
t := typ(TSTRUCT)
tostruct0(t, l)
return t
}
func tostruct0(t *Type, l *NodeList) {
if t == nil || t.Etype != TSTRUCT {
Fatalf("struct expected")
}
for tp := &t.Type; l != nil; l = l.Next {
f = structfield(l.N)
f := structfield(l.N)
*tp = f
tp = &f.Down
@ -896,8 +871,6 @@ func tostruct(l *NodeList) *Type {
if !t.Broke {
checkwidth(t)
}
return t
}
func tofunargs(l *NodeList) *Type {
@ -910,7 +883,7 @@ func tofunargs(l *NodeList) *Type {
f = structfield(l.N)
f.Funarg = true
// esc.c needs to find f given a PPARAM to add the tag.
// esc.go needs to find f given a PPARAM to add the tag.
if l.N.Left != nil && l.N.Left.Class == PPARAM {
l.N.Left.Name.Param.Field = f
}
@ -996,18 +969,23 @@ func interfacefield(n *Node) *Type {
}
func tointerface(l *NodeList) *Type {
var f *Type
var t1 *Type
t := typ(TINTER)
tointerface0(t, l)
return t
}
func tointerface0(t *Type, l *NodeList) *Type {
if t == nil || t.Etype != TINTER {
Fatalf("interface expected")
}
tp := &t.Type
for ; l != nil; l = l.Next {
f = interfacefield(l.N)
f := interfacefield(l.N)
if l.N.Left == nil && f.Type.Etype == TINTER {
// embedded interface, inline methods
for t1 = f.Type.Type; t1 != nil; t1 = t1.Down {
for t1 := f.Type.Type; t1 != nil; t1 = t1.Down {
f = typ(TFIELD)
f.Type = t1.Type
f.Broke = t1.Broke
@ -1065,9 +1043,7 @@ func embedded(s *Sym, pkg *Pkg) *Node {
return n
}
/*
* check that the list of declarations is either all anonymous or all named
*/
// check that the list of declarations is either all anonymous or all named
func findtype(l *NodeList) *Node {
for ; l != nil; l = l.Next {
if l.N.Op == OKEY {
@ -1132,7 +1108,7 @@ func checkarglist(all *NodeList, input int) *NodeList {
// declarations, which are parsed by rules that don't
// use checkargs, but can happen for func literals in
// the inline bodies.
// TODO(rsc) this can go when typefmt case TFIELD in exportmode fmt.c prints _ instead of ?
// TODO(rsc) this can go when typefmt case TFIELD in exportmode fmt.go prints _ instead of ?
if importpkg != nil && n.Sym == nil {
n = nil
}
@ -1172,12 +1148,9 @@ func fakethis() *Node {
return n
}
/*
* Is this field a method on an interface?
* Those methods have an anonymous
* *struct{} as the receiver.
* (See fakethis above.)
*/
// Is this field a method on an interface?
// Those methods have an anonymous *struct{} as the receiver.
// (See fakethis above.)
func isifacemethod(f *Type) bool {
rcvr := getthisx(f).Type
if rcvr.Sym != nil {
@ -1194,12 +1167,17 @@ func isifacemethod(f *Type) bool {
return true
}
/*
* turn a parsed function declaration
* into a type
*/
// turn a parsed function declaration into a type
func functype(this *Node, in *NodeList, out *NodeList) *Type {
t := typ(TFUNC)
functype0(t, this, in, out)
return t
}
func functype0(t *Type, this *Node, in *NodeList, out *NodeList) {
if t == nil || t.Etype != TFUNC {
Fatalf("function type expected")
}
var rcvr *NodeList
if this != nil {
@ -1230,8 +1208,6 @@ func functype(this *Node, in *NodeList, out *NodeList) *Type {
t.Outnamed = true
}
}
return t
}
var methodsym_toppkg *Pkg
@ -1339,10 +1315,8 @@ func methodname1(n *Node, t *Node) *Node {
return n
}
/*
* add a method, declared as a function,
* n is fieldname, pa is base type, t is function type
*/
// add a method, declared as a function,
// n is fieldname, pa is base type, t is function type
func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
// get field sym
if sf == nil {
@ -1501,3 +1475,116 @@ func makefuncsym(s *Sym) {
s1.Def.Func.Shortname = newname(s)
funcsyms = append(funcsyms, s1.Def)
}
type nowritebarrierrecChecker struct {
curfn *Node
stable bool
// best maps from the ODCLFUNC of each visited function that
// recursively invokes a write barrier to the called function
// on the shortest path to a write barrier.
best map[*Node]nowritebarrierrecCall
}
type nowritebarrierrecCall struct {
target *Node
depth int
lineno int32
}
func checknowritebarrierrec() {
c := nowritebarrierrecChecker{
best: make(map[*Node]nowritebarrierrecCall),
}
visitBottomUp(xtop, func(list []*Node, recursive bool) {
// Functions with write barriers have depth 0.
for _, n := range list {
if n.Func.WBLineno != 0 {
c.best[n] = nowritebarrierrecCall{target: nil, depth: 0, lineno: n.Func.WBLineno}
}
}
// Propagate write barrier depth up from callees. In
// the recursive case, we have to update this at most
// len(list) times and can stop when we an iteration
// that doesn't change anything.
for _ = range list {
c.stable = false
for _, n := range list {
if n.Func.WBLineno == 0 {
c.curfn = n
c.visitcodelist(n.Nbody)
}
}
if c.stable {
break
}
}
// Check nowritebarrierrec functions.
for _, n := range list {
if !n.Func.Nowritebarrierrec {
continue
}
call, hasWB := c.best[n]
if !hasWB {
continue
}
// Build the error message in reverse.
err := ""
for call.target != nil {
err = fmt.Sprintf("\n\t%v: called by %v%s", Ctxt.Line(int(call.lineno)), n.Func.Nname, err)
n = call.target
call = c.best[n]
}
err = fmt.Sprintf("write barrier prohibited by caller; %v%s", n.Func.Nname, err)
yyerrorl(int(n.Func.WBLineno), err)
}
})
}
func (c *nowritebarrierrecChecker) visitcodelist(l *NodeList) {
for ; l != nil; l = l.Next {
c.visitcode(l.N)
}
}
func (c *nowritebarrierrecChecker) visitcode(n *Node) {
if n == nil {
return
}
if n.Op == OCALLFUNC || n.Op == OCALLMETH {
c.visitcall(n)
}
c.visitcodelist(n.Ninit)
c.visitcode(n.Left)
c.visitcode(n.Right)
c.visitcodelist(n.List)
c.visitcodelist(n.Nbody)
c.visitcodelist(n.Rlist)
}
func (c *nowritebarrierrecChecker) visitcall(n *Node) {
fn := n.Left
if n.Op == OCALLMETH {
fn = n.Left.Right.Sym.Def
}
if fn == nil || fn.Op != ONAME || fn.Class != PFUNC || fn.Name.Defn == nil {
return
}
defn := fn.Name.Defn
fnbest, ok := c.best[defn]
if !ok {
return
}
best, ok := c.best[c.curfn]
if ok && fnbest.depth+1 >= best.depth {
return
}
c.best[c.curfn] = nowritebarrierrecCall{target: defn, depth: fnbest.depth + 1, lineno: n.Lineno}
c.stable = false
}

View File

@ -7,6 +7,7 @@ package gc
import (
"cmd/internal/obj"
"fmt"
"strconv"
"strings"
)
@ -855,7 +856,7 @@ func esc(e *EscState, n *Node, up *Node) {
var v *Node
for ll := n.Func.Cvars; ll != nil; ll = ll.Next {
v = ll.N
if v.Op == OXXX { // unnamed out argument; see dcl.c:/^funcargs
if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
continue
}
a = v.Name.Param.Closure
@ -1124,7 +1125,8 @@ func parsetag(note *string) uint16 {
if note == nil || !strings.HasPrefix(*note, "esc:") {
return EscUnknown
}
em := uint16(atoi((*note)[4:]))
n, _ := strconv.ParseInt((*note)[4:], 0, 0)
em := uint16(n)
if em == 0 {
return EscNone
}

View File

@ -5,6 +5,7 @@
package gc
import (
"bytes"
"cmd/internal/obj"
"fmt"
"sort"
@ -12,6 +13,20 @@ import (
"unicode/utf8"
)
var (
newexport int // if set, use new export format
Debug_export int // if set, print debugging information about export data
exportsize int
)
func exportf(format string, args ...interface{}) {
n, _ := fmt.Fprintf(bout, format, args...)
exportsize += n
if Debug_export != 0 {
fmt.Printf(format, args...)
}
}
var asmlist *NodeList
// Mark n's symbol as exported
@ -35,8 +50,8 @@ func exportsym(n *Node) {
}
func exportname(s string) bool {
if s[0] < utf8.RuneSelf {
return 'A' <= s[0] && s[0] <= 'Z'
if r := s[0]; r < utf8.RuneSelf {
return 'A' <= r && r <= 'Z'
}
r, _ := utf8.DecodeRuneInString(s)
return unicode.IsUpper(r)
@ -57,7 +72,7 @@ func exportedsym(sym *Sym) bool {
return sym.Pkg == localpkg && exportname(sym.Name)
}
func autoexport(n *Node, ctxt uint8) {
func autoexport(n *Node, ctxt Class) {
if n == nil || n.Sym == nil {
return
}
@ -87,7 +102,7 @@ func dumppkg(p *Pkg) {
if !p.Direct {
suffix = " // indirect"
}
fmt.Fprintf(bout, "\timport %s %q%s\n", p.Name, p.Path, suffix)
exportf("\timport %s %q%s\n", p.Name, p.Path, suffix)
}
// Look for anything we need for the inline body
@ -128,7 +143,7 @@ func reexportdep(n *Node) {
}
}
// Local variables in the bodies need their type.
// Local variables in the bodies need their type.
case ODCL:
t := n.Left.Type
@ -167,7 +182,7 @@ func reexportdep(n *Node) {
exportlist = append(exportlist, n)
}
// for operations that need a type when rendered, put the type on the export list.
// for operations that need a type when rendered, put the type on the export list.
case OCONV,
OCONVIFACE,
OCONVNOP,
@ -216,9 +231,9 @@ func dumpexportconst(s *Sym) {
dumpexporttype(t)
if t != nil && !isideal(t) {
fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
exportf("\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
} else {
fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
exportf("\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(n.Val(), obj.FmtSharp))
}
}
@ -242,14 +257,14 @@ func dumpexportvar(s *Sym) {
}
// NOTE: The space after %#S here is necessary for ld's export data parser.
fmt.Fprintf(bout, "\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Func.Inl, obj.FmtSharp))
exportf("\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Func.Inl, obj.FmtSharp))
reexportdeplist(n.Func.Inl)
} else {
fmt.Fprintf(bout, "\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
exportf("\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
}
} else {
fmt.Fprintf(bout, "\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
exportf("\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
}
}
@ -287,10 +302,10 @@ func dumpexporttype(t *Type) {
}
sort.Sort(methodbyname(m))
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
exportf("\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
for _, f := range m {
if f.Nointerface {
fmt.Fprintf(bout, "\t//go:nointerface\n")
exportf("\t//go:nointerface\n")
}
if f.Type.Nname != nil && f.Type.Nname.Func.Inl != nil { // nname was set by caninl
@ -299,10 +314,10 @@ func dumpexporttype(t *Type) {
if Debug['l'] < 2 {
typecheckinl(f.Type.Nname)
}
fmt.Fprintf(bout, "\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Func.Inl, obj.FmtSharp))
exportf("\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Func.Inl, obj.FmtSharp))
reexportdeplist(f.Type.Nname.Func.Inl)
} else {
fmt.Fprintf(bout, "\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
exportf("\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
}
}
}
@ -341,44 +356,86 @@ func dumpsym(s *Sym) {
}
func dumpexport() {
lno := lineno
if buildid != "" {
fmt.Fprintf(bout, "build id %q\n", buildid)
exportf("build id %q\n", buildid)
}
fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
if safemode != 0 {
fmt.Fprintf(bout, " safe")
}
fmt.Fprintf(bout, "\n")
for _, p := range pkgs {
if p.Direct {
dumppkg(p)
size := 0 // size of export section without enclosing markers
if forceNewExport || newexport != 0 {
// binary export
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf("\n$$B\n") // indicate binary format
const verifyExport = true // enable to check format changes
if verifyExport {
// save a copy of the export data
var copy bytes.Buffer
bcopy := obj.Binitw(&copy)
size = Export(bcopy, Debug_export != 0)
bcopy.Flush() // flushing to bytes.Buffer cannot fail
if n, err := bout.Write(copy.Bytes()); n != size || err != nil {
Fatalf("error writing export data: got %d bytes, want %d bytes, err = %v", n, size, err)
}
// export data must contain no '$' so that we can find the end by searching for "$$"
if bytes.IndexByte(copy.Bytes(), '$') >= 0 {
Fatalf("export data contains $")
}
// verify that we can read the copied export data back in
// (use empty package map to avoid collisions)
savedPkgMap := pkgMap
savedPkgs := pkgs
pkgMap = make(map[string]*Pkg)
pkgs = nil
importpkg = mkpkg("")
Import(obj.Binitr(&copy)) // must not die
importpkg = nil
pkgs = savedPkgs
pkgMap = savedPkgMap
} else {
size = Export(bout, Debug_export != 0)
}
exportf("\n$$\n")
} else {
// textual export
lno := lineno
exportf("\n$$\n") // indicate textual format
exportsize = 0
exportf("package %s", localpkg.Name)
if safemode != 0 {
exportf(" safe")
}
exportf("\n")
for _, p := range pkgs {
if p.Direct {
dumppkg(p)
}
}
// exportlist grows during iteration - cannot use range
for len(exportlist) > 0 {
n := exportlist[0]
exportlist = exportlist[1:]
lineno = n.Lineno
dumpsym(n.Sym)
}
size = exportsize
exportf("\n$$\n")
lineno = lno
}
// exportlist grows during iteration - cannot use range
for len(exportlist) > 0 {
n := exportlist[0]
exportlist = exportlist[1:]
lineno = n.Lineno
dumpsym(n.Sym)
if Debug_export != 0 {
fmt.Printf("export data size = %d bytes\n", size)
}
fmt.Fprintf(bout, "\n$$\n")
lineno = lno
}
/*
* import
*/
// import
/*
* return the sym for ss, which should match lexical
*/
func importsym(s *Sym, op int) *Sym {
if s.Def != nil && int(s.Def.Op) != op {
// return the sym for ss, which should match lexical
func importsym(s *Sym, op Op) *Sym {
if s.Def != nil && s.Def.Op != op {
pkgstr := fmt.Sprintf("during import %q", importpkg.Path)
redeclare(s, pkgstr)
}
@ -395,9 +452,7 @@ func importsym(s *Sym, op int) *Sym {
return s
}
/*
* return the type pkg.name, forward declaring if needed
*/
// return the type pkg.name, forward declaring if needed
func pkgtype(s *Sym) *Type {
importsym(s, OTYPE)
if s.Def == nil || s.Def.Op != OTYPE {

View File

@ -47,9 +47,9 @@ import (
// Flags: those of %N
// ',' separate items with ',' instead of ';'
//
// In mparith1.c:
// %B Mpint* Big integers
// %F Mpflt* Big floats
// In mparith2.go and mparith3.go:
// %B Mpint* Big integers
// %F Mpflt* Big floats
//
// %S, %T and %N obey use the following flags to set the format mode:
const (
@ -401,8 +401,8 @@ var etnames = []string{
}
// Fmt "%E": etype
func Econv(et int, flag int) string {
if et >= 0 && et < len(etnames) && etnames[et] != "" {
func Econv(et EType) string {
if int(et) < len(etnames) && etnames[et] != "" {
return etnames[et]
}
return fmt.Sprintf("E-%d", et)
@ -537,7 +537,7 @@ func typefmt(t *Type, flag int) string {
if fmtmode == FDbg {
fmtmode = 0
str := Econv(int(t.Etype), 0) + "-" + typefmt(t, flag)
str := Econv(t.Etype) + "-" + typefmt(t, flag)
fmtmode = FDbg
return str
}
@ -714,7 +714,7 @@ func typefmt(t *Type, flag int) string {
}
} else if fmtmode == FExp {
// TODO(rsc) this breaks on the eliding of unused arguments in the backend
// when this is fixed, the special case in dcl.c checkarglist can go.
// when this is fixed, the special case in dcl.go checkarglist can go.
//if(t->funarg)
// fmtstrcpy(fp, "_ ");
//else
@ -756,15 +756,15 @@ func typefmt(t *Type, flag int) string {
}
if fmtmode == FExp {
Fatalf("missing %v case during export", Econv(int(t.Etype), 0))
Fatalf("missing %v case during export", Econv(t.Etype))
}
// Don't know how to handle - fall back to detailed prints.
return fmt.Sprintf("%v <%v> %v", Econv(int(t.Etype), 0), t.Sym, t.Type)
return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.Type)
}
// Statements which may be rendered with a simplestmt as init.
func stmtwithinit(op int) bool {
func stmtwithinit(op Op) bool {
switch op {
case OIF, OFOR, OSWITCH:
return true
@ -782,13 +782,13 @@ func stmtfmt(n *Node) string {
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(n.Op)
// otherwise, print the inits as separate statements
complexinit := n.Ninit != nil && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
extrablock := complexinit && stmtwithinit(int(n.Op))
extrablock := complexinit && stmtwithinit(n.Op)
if extrablock {
f += "{"
@ -817,7 +817,7 @@ func stmtfmt(n *Node) string {
f += Nconv(n.Right, 0)
}
// Don't export "v = <N>" initializing statements, hope they're always
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typecheck to reproduce
// the "v = <N>" again.
case OAS, OASWB:
@ -833,7 +833,7 @@ func stmtfmt(n *Node) string {
case OASOP:
if n.Implicit {
if n.Etype == OADD {
if Op(n.Etype) == OADD {
f += fmt.Sprintf("%v++", n.Left)
} else {
f += fmt.Sprintf("%v--", n.Left)
@ -1128,7 +1128,7 @@ func exprfmt(n *Node, prec int) string {
return Vconv(n.Val(), 0)
// Special case: name used as local variable in export.
// Special case: name used as local variable in export.
// _ becomes ~b%d internally; print as _ for export
case ONAME:
if (fmtmode == FExp || fmtmode == FErr) && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
@ -1150,7 +1150,6 @@ func exprfmt(n *Node, prec int) string {
}
fallthrough
//fallthrough
case OPACK, ONONAME:
return Sconv(n.Sym, 0)
@ -1444,6 +1443,7 @@ func exprfmt(n *Node, prec int) string {
case OCMPSTR, OCMPIFACE:
var f string
f += exprfmt(n.Left, nprec)
// TODO(marvin): Fix Node.EType type union.
f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
f += exprfmt(n.Right, nprec+1)
return f

View File

@ -9,10 +9,8 @@ import (
"fmt"
)
/*
* portable half of code generator.
* mainly statements and control flow.
*/
// portable half of code generator.
// mainly statements and control flow.
var labellist *Label
var lastlabel *Label
@ -213,18 +211,14 @@ func stmtlabel(n *Node) *Label {
return nil
}
/*
* compile statements
*/
// compile statements
func Genlist(l *NodeList) {
for ; l != nil; l = l.Next {
gen(l.N)
}
}
/*
* generate code to start new proc running call n.
*/
// generate code to start new proc running call n.
func cgen_proc(n *Node, proc int) {
switch n.Left.Op {
default:
@ -241,11 +235,9 @@ func cgen_proc(n *Node, proc int) {
}
}
/*
* generate declaration.
* have to allocate heap copy
* for escaped variables.
*/
// generate declaration.
// have to allocate heap copy
// for escaped variables.
func cgen_dcl(n *Node) {
if Debug['g'] != 0 {
Dump("\ncgen-dcl", n)
@ -267,9 +259,7 @@ func cgen_dcl(n *Node) {
Cgen_as(n.Name.Heapaddr, prealloc[n])
}
/*
* generate discard of value
*/
// generate discard of value
func cgen_discard(nr *Node) {
if nr == nil {
return
@ -324,9 +314,7 @@ func cgen_discard(nr *Node) {
}
}
/*
* clearslim generates code to zero a slim node.
*/
// clearslim generates code to zero a slim node.
func Clearslim(n *Node) {
var z Node
z.Op = OLITERAL
@ -369,17 +357,13 @@ func Clearslim(n *Node) {
Cgen(&z, n)
}
/*
* generate:
* res = iface{typ, data}
* n->left is typ
* n->right is data
*/
// generate:
// res = iface{typ, data}
// n->left is typ
// n->right is data
func Cgen_eface(n *Node, res *Node) {
/*
* the right node of an eface may contain function calls that uses res as an argument,
* so it's important that it is done first
*/
// the right node of an eface may contain function calls that uses res as an argument,
// so it's important that it is done first
tmp := temp(Types[Tptr])
Cgen(n.Right, tmp)
@ -395,13 +379,11 @@ func Cgen_eface(n *Node, res *Node) {
Cgen(n.Left, &dst)
}
/*
* generate one of:
* res, resok = x.(T)
* res = x.(T) (when resok == nil)
* n.Left is x
* n.Type is T
*/
// generate one of:
// res, resok = x.(T)
// res = x.(T) (when resok == nil)
// n.Left is x
// n.Type is T
func cgen_dottype(n *Node, res, resok *Node, wb bool) {
if Debug_typeassert > 0 {
Warn("type assertion inlined")
@ -487,12 +469,10 @@ func cgen_dottype(n *Node, res, resok *Node, wb bool) {
}
}
/*
* generate:
* res, resok = x.(T)
* n.Left is x
* n.Type is T
*/
// generate:
// res, resok = x.(T)
// n.Left is x
// n.Type is T
func Cgen_As2dottype(n, res, resok *Node) {
if Debug_typeassert > 0 {
Warn("type assertion inlined")
@ -551,11 +531,9 @@ func Cgen_As2dottype(n, res, resok *Node) {
Patch(q, Pc)
}
/*
* gather series of offsets
* >=0 is direct addressed field
* <0 is pointer to next field (+1)
*/
// gather series of offsets
// >=0 is direct addressed field
// <0 is pointer to next field (+1)
func Dotoffset(n *Node, oary []int64, nn **Node) int {
var i int
@ -604,9 +582,7 @@ func Dotoffset(n *Node, oary []int64, nn **Node) int {
return i
}
/*
* make a new off the books
*/
// make a new off the books
func Tempname(nn *Node, t *Type) {
if Curfn == nil {
Fatalf("no curfn for tempname")
@ -1038,7 +1014,7 @@ func componentgen_wb(nr, nl *Node, wb bool) bool {
numPtr := 0
visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
n++
if int(Simtype[t.Etype]) == Tptr && t != itable {
if Simtype[t.Etype] == Tptr && t != itable {
numPtr++
}
return n <= maxMoves && (!wb || numPtr <= 1)
@ -1155,7 +1131,7 @@ func componentgen_wb(nr, nl *Node, wb bool) bool {
ptrOffset int64
)
visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
if wb && int(Simtype[t.Etype]) == Tptr && t != itable {
if wb && Simtype[t.Etype] == Tptr && t != itable {
if ptrType != nil {
Fatalf("componentgen_wb %v", Tconv(nl.Type, 0))
}

View File

@ -10,8 +10,6 @@ import (
"cmd/internal/obj"
)
// avoid <ctype.h>
// The parser's maximum stack size.
// We have to use a #define macro here since yacc
// or bison will check for its definition and use
@ -95,7 +93,7 @@ type Val struct {
type NilVal struct{}
func (v Val) Ctype() int {
func (v Val) Ctype() Ctype {
switch x := v.U.(type) {
default:
Fatalf("unexpected Ctype for %T", v.U)
@ -153,7 +151,7 @@ type Sym struct {
}
type Type struct {
Etype uint8
Etype EType
Nointerface bool
Noalg bool
Chan uint8
@ -260,6 +258,8 @@ type Iter struct {
T *Type
}
type EType uint8
const (
Txxx = iota
@ -288,7 +288,7 @@ const (
TFUNC
TARRAY
T_old_DARRAY
T_old_DARRAY // Doesn't seem to be used in existing code. Used now for Isddd export (see bexport.go). TODO(gri) rename.
TSTRUCT
TCHAN
TMAP
@ -312,8 +312,11 @@ const (
NTYPE
)
// Ctype describes the constant kind of an "ideal" (untyped) constant.
type Ctype int8
const (
CTxxx = iota
CTxxx Ctype = iota
CTINT
CTRUNE
@ -325,27 +328,31 @@ const (
)
const (
/* types of channel */
/* must match ../../pkg/nreflect/type.go:/Chandir */
// types of channel
// must match ../../pkg/nreflect/type.go:/Chandir
Cxxx = 0
Crecv = 1 << 0
Csend = 1 << 1
Cboth = Crecv | Csend
)
// declaration context
// The Class of a variable/function describes the "storage class"
// of a variable or function. During parsing, storage classes are
// called declaration contexts.
type Class uint8
const (
Pxxx = uint8(iota)
PEXTERN // global variable
PAUTO // local variables
PPARAM // input arguments
PPARAMOUT // output results
PPARAMREF // closure variable reference
PFUNC // global function
Pxxx Class = iota
PEXTERN // global variable
PAUTO // local variables
PPARAM // input arguments
PPARAMOUT // output results
PPARAMREF // closure variable reference
PFUNC // global function
PDISCARD // discard during parse of duplicate import
PHEAP = uint8(1 << 7) // an extra bit to identify an escaped variable
PHEAP = 1 << 7 // an extra bit to identify an escaped variable
)
const (
@ -364,8 +371,8 @@ const (
type Typedef struct {
Name string
Etype int
Sameas int
Etype EType
Sameas EType
}
type Sig struct {
@ -399,10 +406,8 @@ type Idir struct {
dir string
}
/*
* argument passing to/from
* smagic and umagic
*/
// argument passing to/from
// smagic and umagic
type Magic struct {
W int // input for both - width
S int // output for both - shift
@ -418,17 +423,15 @@ type Magic struct {
Ua int // output - adder
}
/*
* note this is the runtime representation
* of the compilers arrays.
*
* typedef struct
* { // must not move anything
* uchar array[8]; // pointer to data
* uchar nel[4]; // number of elements
* uchar cap[4]; // allocated number of elements
* } Array;
*/
// note this is the runtime representation
// of the compilers arrays.
//
// typedef struct
// { // must not move anything
// uchar array[8]; // pointer to data
// uchar nel[4]; // number of elements
// uchar cap[4]; // allocated number of elements
// } Array;
var Array_array int // runtime offsetof(Array,array) - same for String
var Array_nel int // runtime offsetof(Array,nel) - same for String
@ -437,16 +440,14 @@ var Array_cap int // runtime offsetof(Array,cap)
var sizeof_Array int // runtime sizeof(Array)
/*
* note this is the runtime representation
* of the compilers strings.
*
* typedef struct
* { // must not move anything
* uchar array[8]; // pointer to data
* uchar nel[4]; // number of elements
* } String;
*/
// note this is the runtime representation
// of the compilers strings.
//
// typedef struct
// { // must not move anything
// uchar array[8]; // pointer to data
// uchar nel[4]; // number of elements
// } String;
var sizeof_String int // runtime sizeof(String)
var dotlist [10]Dlist // size is max depth of embeddeds
@ -483,8 +484,7 @@ var nolocalimports int
var lexbuf bytes.Buffer
var strbuf bytes.Buffer
var litbuf string
var litbuf string // LLITERAL value for use in syntax error messages
var Debug [256]int
@ -511,6 +511,8 @@ var Runtimepkg *Pkg // package runtime
var racepkg *Pkg // package runtime/race
var msanpkg *Pkg // package runtime/msan
var typepkg *Pkg // fake package for runtime type info (headers)
var typelinkpkg *Pkg // fake package for runtime type info (data)
@ -521,7 +523,7 @@ var unsafepkg *Pkg // package unsafe
var trackpkg *Pkg // fake package for field tracking
var Tptr int // either TPTR32 or TPTR64
var Tptr EType // either TPTR32 or TPTR64
var myimportpath string
@ -543,7 +545,7 @@ var runetype *Type
var errortype *Type
var Simtype [NTYPE]uint8
var Simtype [NTYPE]EType
var (
Isptr [NTYPE]bool
@ -591,7 +593,7 @@ var importlist []*Node // imported functions and methods with inlinable bodies
var funcsyms []*Node
var dclcontext uint8 // PEXTERN/PAUTO
var dclcontext Class // PEXTERN/PAUTO
var incannedimport int
@ -625,7 +627,7 @@ var Widthint int
var Widthreg int
var typesw *Node
var typesw *Node // TODO(gri) remove when yacc-based parser is gone
var nblank *Node
@ -645,15 +647,23 @@ var flag_installsuffix string
var flag_race int
var flag_msan int
var flag_largemodel int
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
var instrumenting bool
// Pending annotations for next func declaration.
var (
noescape bool
nosplit bool
nowritebarrier bool
systemstack bool
norace bool
noescape bool
noinline bool
norace bool
nosplit bool
nowritebarrier bool
nowritebarrierrec bool
systemstack bool
)
var debuglive int
@ -706,9 +716,7 @@ type Graph struct {
Rpo []*Flow
}
/*
* interface to back end
*/
// interface to back end
const (
// Pseudo-op, like TEXT, GLOBL, TYPE, PCDATA, FUNCDATA.
@ -786,14 +794,14 @@ type Arch struct {
Bgen_float func(*Node, bool, int, *obj.Prog) // optional
Cgen64 func(*Node, *Node) // only on 32-bit systems
Cgenindex func(*Node, *Node, bool) *obj.Prog
Cgen_bmul func(int, *Node, *Node, *Node) bool
Cgen_bmul func(Op, *Node, *Node, *Node) bool
Cgen_float func(*Node, *Node) // optional
Cgen_hmul func(*Node, *Node, *Node)
Cgen_shift func(int, bool, *Node, *Node, *Node)
Cgen_shift func(Op, bool, *Node, *Node, *Node)
Clearfat func(*Node)
Cmp64 func(*Node, *Node, int, int, *obj.Prog) // only on 32-bit systems
Cmp64 func(*Node, *Node, Op, int, *obj.Prog) // only on 32-bit systems
Defframe func(*obj.Prog)
Dodiv func(int, *Node, *Node, *Node)
Dodiv func(Op, *Node, *Node, *Node)
Excise func(*Flow)
Expandchecks func(*obj.Prog)
Getg func(*Node)
@ -809,7 +817,7 @@ type Arch struct {
// function calls needed during the evaluation, and on 32-bit systems
// the values are guaranteed not to be 64-bit values, so no in-memory
// temporaries are necessary.
Ginscmp func(op int, t *Type, n1, n2 *Node, likely int) *obj.Prog
Ginscmp func(op Op, t *Type, n1, n2 *Node, likely int) *obj.Prog
// Ginsboolval inserts instructions to convert the result
// of a just-completed comparison to a boolean value.
@ -838,7 +846,7 @@ type Arch struct {
FtoB func(int) uint64
BtoR func(uint64) int
BtoF func(uint64) int
Optoas func(int, *Type) int
Optoas func(Op, *Type) int
Doregbits func(int) uint64
Regnames func(*int) []string
Use387 bool // should 8g use 387 FP instructions instead of sse2.

View File

@ -254,6 +254,7 @@ import_stmt:
break;
}
if my.Name == "init" {
lineno = int32($1)
Yyerror("cannot import package as init - init must be a func");
break;
}
@ -315,7 +316,9 @@ import_package:
} else if importpkg.Name != $2.Name {
Yyerror("conflicting names %s and %s for package %q", importpkg.Name, $2.Name, importpkg.Path);
}
importpkg.Direct = true;
if incannedimport == 0 {
importpkg.Direct = true;
}
importpkg.Safe = curio.importsafe
if safemode != 0 && !curio.importsafe {
@ -487,7 +490,7 @@ simple_stmt:
| expr LASOP expr
{
$$ = Nod(OASOP, $1, $3);
$$.Etype = uint8($2); // rathole to pass opcode
$$.Etype = EType($2); // rathole to pass opcode
}
| expr_list '=' expr_list
{
@ -510,7 +513,7 @@ simple_stmt:
}
if $1.Next != nil {
Yyerror("argument count mismatch: %d = %d", count($1), 1);
} else if ($1.N.Op != ONAME && $1.N.Op != OTYPE && $1.N.Op != ONONAME) || isblank($1.N) {
} else if ($1.N.Op != ONAME && $1.N.Op != OTYPE && $1.N.Op != ONONAME && ($1.N.Op != OLITERAL || $1.N.Name == nil)) || isblank($1.N) {
Yyerror("invalid variable name %s in type switch", $1.N);
} else {
$$.Left = dclname($1.N.Sym);
@ -523,13 +526,15 @@ simple_stmt:
{
$$ = Nod(OASOP, $1, Nodintconst(1));
$$.Implicit = true;
$$.Etype = OADD;
// TODO(marvin): Fix Node.EType type union.
$$.Etype = EType(OADD);
}
| expr LDEC
{
$$ = Nod(OASOP, $1, Nodintconst(1));
$$.Implicit = true;
$$.Etype = OSUB;
// TODO(marvin): Fix Node.EType type union.
$$.Etype = EType(OSUB);
}
case:
@ -1392,7 +1397,9 @@ xfndcl:
$$.Noescape = noescape;
$$.Func.Norace = norace;
$$.Func.Nosplit = nosplit;
$$.Func.Noinline = noinline;
$$.Func.Nowritebarrier = nowritebarrier;
$$.Func.Nowritebarrierrec = nowritebarrierrec;
$$.Func.Systemstack = systemstack;
funcbody($$);
}
@ -1578,11 +1585,13 @@ xdcl_list:
if nsyntaxerrors == 0 {
testdclstack();
}
nointerface = false
noescape = false
noinline = false
nointerface = false
norace = false
nosplit = false
nowritebarrier = false
nowritebarrierrec = false
systemstack = false
}

View File

@ -43,9 +43,7 @@ var dfirst *obj.Prog
var dpc *obj.Prog
/*
* Is this node a memory operand?
*/
// Is this node a memory operand?
func Ismem(n *Node) bool {
switch n.Op {
case OITAB,
@ -85,7 +83,7 @@ func Gbranch(as int, t *Type, likely int) *obj.Prog {
p := Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.Val = nil
if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' {
p.From.Type = obj.TYPE_CONST
if likely > 0 {
p.From.Offset = 1
@ -337,7 +335,7 @@ func Naddr(a *obj.Addr, n *Node) {
// n->left is PHEAP ONAME for stack parameter.
// compute address of actual parameter on stack.
case OPARAM:
a.Etype = Simtype[n.Left.Type.Etype]
a.Etype = uint8(Simtype[n.Left.Type.Etype])
a.Width = n.Left.Type.Width
a.Offset = n.Xoffset
@ -362,7 +360,7 @@ func Naddr(a *obj.Addr, n *Node) {
case ONAME:
a.Etype = 0
if n.Type != nil {
a.Etype = Simtype[n.Type.Etype]
a.Etype = uint8(Simtype[n.Type.Etype])
}
a.Offset = n.Xoffset
s := n.Sym
@ -406,6 +404,17 @@ func Naddr(a *obj.Addr, n *Node) {
a.Sym = Linksym(s)
case ODOT:
// A special case to make write barriers more efficient.
// Taking the address of the first field of a named struct
// is the same as taking the address of the struct.
if n.Left.Type.Etype != TSTRUCT || n.Left.Type.Type.Sym != n.Right.Sym {
Debug['h'] = 1
Dump("naddr", n)
Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
}
Naddr(a, n.Left)
case OLITERAL:
if Thearch.Thechar == '8' {
a.Width = 0
@ -440,7 +449,7 @@ func Naddr(a *obj.Addr, n *Node) {
case OADDR:
Naddr(a, n.Left)
a.Etype = uint8(Tptr)
if Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
a.Width = int64(Widthptr)
}
if a.Type != obj.TYPE_MEM {
@ -466,7 +475,7 @@ func Naddr(a *obj.Addr, n *Node) {
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // ptr(nil)
}
a.Etype = Simtype[Tptr]
a.Etype = uint8(Simtype[Tptr])
a.Offset += int64(Array_array)
a.Width = int64(Widthptr)
@ -477,7 +486,7 @@ func Naddr(a *obj.Addr, n *Node) {
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // len(nil)
}
a.Etype = Simtype[TUINT]
a.Etype = uint8(Simtype[TUINT])
a.Offset += int64(Array_nel)
if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
a.Width = int64(Widthint)
@ -490,7 +499,7 @@ func Naddr(a *obj.Addr, n *Node) {
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // cap(nil)
}
a.Etype = Simtype[TUINT]
a.Etype = uint8(Simtype[TUINT])
a.Offset += int64(Array_cap)
if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
a.Width = int64(Widthint)
@ -675,16 +684,14 @@ func Anyregalloc() bool {
return n > len(Thearch.ReservedRegs)
}
/*
* allocate register of type t, leave in n.
* if o != N, o may be reusable register.
* caller must Regfree(n).
*/
// allocate register of type t, leave in n.
// if o != N, o may be reusable register.
// caller must Regfree(n).
func Regalloc(n *Node, t *Type, o *Node) {
if t == nil {
Fatalf("regalloc: t nil")
}
et := int(Simtype[t.Etype])
et := Simtype[t.Etype]
if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
Fatalf("regalloc 64bit")
}

View File

@ -17,13 +17,11 @@ package gc
// a->offset += v;
// break;
/*
* a function named init is a special case.
* it is called by the initialization before
* main is run. to make it unique within a
* package and also uncallable, the name,
* normally "pkg.init", is altered to "pkg.init.1".
*/
// a function named init is a special case.
// it is called by the initialization before
// main is run. to make it unique within a
// package and also uncallable, the name,
// normally "pkg.init", is altered to "pkg.init.1".
var renameinit_initgen int
@ -32,24 +30,22 @@ func renameinit() *Sym {
return Lookupf("init.%d", renameinit_initgen)
}
/*
* hand-craft the following initialization code
* var initdone· uint8 (1)
* func init() (2)
* if initdone· != 0 { (3)
* if initdone· == 2 (4)
* return
* throw(); (5)
* }
* initdone· = 1; (6)
* // over all matching imported symbols
* <pkg>.init() (7)
* { <init stmts> } (8)
* init.<n>() // if any (9)
* initdone· = 2; (10)
* return (11)
* }
*/
// hand-craft the following initialization code
// var initdone· uint8 (1)
// func init() (2)
// if initdone· != 0 { (3)
// if initdone· == 2 (4)
// return
// throw(); (5)
// }
// initdone· = 1; (6)
// // over all matching imported symbols
// <pkg>.init() (7)
// { <init stmts> } (8)
// init.<n>() // if any (9)
// initdone· = 2; (10)
// return (11)
// }
func anyinit(n *NodeList) bool {
// are there any interesting init statements
for l := n; l != nil; l = l.Next {

View File

@ -106,6 +106,11 @@ func caninl(fn *Node) {
Fatalf("caninl no nname %v", Nconv(fn, obj.FmtSign))
}
// If marked "go:noinline", don't inline
if fn.Func.Noinline {
return
}
// If fn has no body (is defined outside of Go), cannot inline it.
if fn.Nbody == nil {
return
@ -124,13 +129,13 @@ func caninl(fn *Node) {
}
}
// Runtime package must not be race instrumented.
// Racewalk skips runtime package. However, some runtime code can be
// Runtime package must not be instrumented.
// Instrument skips runtime package. However, some runtime code can be
// inlined into other packages and instrumented there. To avoid this,
// we disable inlining of runtime functions in race mode.
// we disable inlining of runtime functions when instrumenting.
// The example that we observed is inlining of LockOSThread,
// which lead to false race reports on m contents.
if flag_race != 0 && myimportpath == "runtime" {
if instrumenting && myimportpath == "runtime" {
return
}
@ -345,7 +350,8 @@ func inlnode(np **Node) {
case ODEFER, OPROC:
switch n.Left.Op {
case OCALLFUNC, OCALLMETH:
n.Left.Etype = n.Op
// TODO(marvin): Fix Node.EType type union.
n.Left.Etype = EType(n.Op)
}
fallthrough
@ -445,7 +451,8 @@ func inlnode(np **Node) {
// switch at the top of this function.
switch n.Op {
case OCALLFUNC, OCALLMETH:
if n.Etype == OPROC || n.Etype == ODEFER {
// TODO(marvin): Fix Node.EType type union.
if n.Etype == EType(OPROC) || n.Etype == EType(ODEFER) {
return
}
}

View File

@ -58,6 +58,7 @@ var debugtab = []struct {
{"slice", &Debug_slice}, // print information about slice compilation
{"typeassert", &Debug_typeassert}, // print information about type assertion inlining
{"wb", &Debug_wb}, // print information about write barriers
{"export", &Debug_export}, // print export data
}
const (
@ -200,6 +201,9 @@ func Main() {
obj.Flagcount("l", "disable inlining", &Debug['l'])
obj.Flagcount("live", "debug liveness analysis", &debuglive)
obj.Flagcount("m", "print optimization decisions", &Debug['m'])
obj.Flagcount("msan", "build code compatible with C/C++ memory sanitizer", &flag_msan)
obj.Flagcount("newexport", "use new export format", &newexport) // TODO(gri) remove eventually (issue 13241)
obj.Flagcount("oldparser", "use old parser", &oldparser) // TODO(gri) remove eventually (issue 13240)
obj.Flagcount("nolocalimports", "reject local (relative) imports", &nolocalimports)
obj.Flagstr("o", "write output to `file`", &outfile)
obj.Flagstr("p", "set expected package import `path`", &myimportpath)
@ -217,11 +221,15 @@ func Main() {
obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y'])
var flag_shared int
var flag_dynlink bool
if Thearch.Thechar == '6' || Thearch.Thechar == '5' {
switch Thearch.Thechar {
case '5', '6', '7', '9':
obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
}
if Thearch.Thechar == '6' {
obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
}
switch Thearch.Thechar {
case '5', '6', '7', '8', '9':
flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
}
obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
@ -248,6 +256,15 @@ func Main() {
racepkg = mkpkg("runtime/race")
racepkg.Name = "race"
}
if flag_msan != 0 {
msanpkg = mkpkg("runtime/msan")
msanpkg.Name = "msan"
}
if flag_race != 0 && flag_msan != 0 {
log.Fatal("can not use both -race and -msan")
} else if flag_race != 0 || flag_msan != 0 {
instrumenting = true
}
// parse -d argument
if debugstr != "" {
@ -301,7 +318,19 @@ func Main() {
lexlineno = 1
const BOM = 0xFEFF
// Uncomment the line below to temporarily switch the compiler back
// to the yacc-based parser. Short-term work-around for issues with
// the new recursive-descent parser for which setting -oldparser is
// not sufficient.
// TODO(gri) remove this eventually (issue 13240)
//
// oldparser = 1
for _, infile = range flag.Args() {
if trace && Debug['x'] != 0 && oldparser == 0 {
fmt.Printf("--- %s ---\n", infile)
}
linehistpush(infile)
curio.infile = infile
@ -463,6 +492,10 @@ func Main() {
fninit(xtop)
}
if compiling_runtime != 0 {
checknowritebarrierrec()
}
// Phase 9: Check external declarations.
for i, n := range externdcl {
if n.Op == ONAME {
@ -521,7 +554,7 @@ func arsize(b *obj.Biobuf, name string) int {
}
func skiptopkgdef(b *obj.Biobuf) bool {
/* archive header */
// archive header
p := obj.Brdline(b, '\n')
if p == "" {
return false
@ -533,7 +566,7 @@ func skiptopkgdef(b *obj.Biobuf) bool {
return false
}
/* symbol table may be first; skip it */
// symbol table may be first; skip it
sz := arsize(b, "__.GOSYMDEF")
if sz >= 0 {
@ -542,7 +575,7 @@ func skiptopkgdef(b *obj.Biobuf) bool {
obj.Bseek(b, 8, 0)
}
/* package export block is next */
// package export block is next
sz = arsize(b, "__.PKGDEF")
if sz <= 0 {
@ -620,6 +653,9 @@ func findpkg(name string) (file string, ok bool) {
} else if flag_race != 0 {
suffixsep = "_"
suffix = "race"
} else if flag_msan != 0 {
suffixsep = "_"
suffix = "msan"
}
file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", goroot, goos, goarch, suffixsep, suffix, name)
@ -640,6 +676,7 @@ func fakeimport() {
cannedimports("fake.o", "$$\n")
}
// TODO(gri) line argument doesn't appear to be used
func importfile(f *Val, line int) {
if _, ok := f.U.(string); !ok {
Yyerror("import statement not a string")
@ -771,42 +808,69 @@ func importfile(f *Val, line int) {
// so don't record the full path.
linehistpragma(file[len(file)-len(path_)-2:]) // acts as #pragma lib
/*
* position the input right
* after $$ and return
*/
pushedio = curio
curio.bin = imp
curio.peekc = 0
curio.peekc1 = 0
curio.infile = file
curio.nlsemi = false
typecheckok = true
// In the importfile, if we find:
// $$\n (old format): position the input right after $$\n and return
// $$B\n (new format): import directly, then feed the lexer a dummy statement
// look for $$
var c int
for {
c := getc()
if c == EOF {
c = obj.Bgetc(imp)
if c < 0 {
break
}
if c != '$' {
continue
if c == '$' {
c = obj.Bgetc(imp)
if c == '$' || c < 0 {
break
}
}
c = getc()
if c == EOF {
break
}
if c != '$' {
continue
}
return
}
Yyerror("no import in %q", f.U.(string))
unimportfile()
// get character after $$
if c >= 0 {
c = obj.Bgetc(imp)
}
switch c {
case '\n':
// old export format
pushedio = curio
curio.bin = imp
curio.peekc = 0
curio.peekc1 = 0
curio.infile = file
curio.nlsemi = false
typecheckok = true
if oldparser == 0 {
push_parser()
}
case 'B':
// new export format
obj.Bgetc(imp) // skip \n after $$B
Import(imp)
// continue as if the package was imported before (see above)
tag := ""
if importpkg.Safe {
tag = "safe"
}
p := fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
cannedimports(file, p)
default:
Yyerror("no import in %q", f.U.(string))
}
}
func unimportfile() {
if oldparser == 0 {
pop_parser()
}
if curio.bin != nil {
obj.Bterm(curio.bin)
curio.bin = nil
@ -836,6 +900,10 @@ func cannedimports(file string, cp string) {
typecheckok = true
incannedimport = 1
if oldparser == 0 {
push_parser()
}
}
func isSpace(c int) bool {
@ -909,10 +977,10 @@ l0:
goto l0
}
lineno = lexlineno /* start of token */
lineno = lexlineno // start of token
if c >= utf8.RuneSelf {
/* all multibyte runes are alpha */
// all multibyte runes are alpha
cp = &lexbuf
cp.Reset()
@ -1036,7 +1104,7 @@ l0:
c1 = '.'
}
/* "..." */
// "..."
case '"':
lexbuf.Reset()
lexbuf.WriteString(`"<string>"`)
@ -1057,7 +1125,7 @@ l0:
goto strlit
/* `...` */
// `...`
case '`':
lexbuf.Reset()
lexbuf.WriteString("`<string>`")
@ -1083,7 +1151,7 @@ l0:
goto strlit
/* '.' */
// '.'
case '\'':
if escchar('\'', &escflag, &v) {
Yyerror("empty character literal or unescaped ' in character literal")
@ -1148,14 +1216,14 @@ l0:
}
if c1 == '=' {
c = ODIV
c = int(ODIV)
goto asop
}
case ':':
c1 = getc()
if c1 == '=' {
c = LCOLAS
c = int(LCOLAS)
yylval.i = int(lexlineno)
goto lx
}
@ -1163,48 +1231,48 @@ l0:
case '*':
c1 = getc()
if c1 == '=' {
c = OMUL
c = int(OMUL)
goto asop
}
case '%':
c1 = getc()
if c1 == '=' {
c = OMOD
c = int(OMOD)
goto asop
}
case '+':
c1 = getc()
if c1 == '+' {
c = LINC
c = int(LINC)
goto lx
}
if c1 == '=' {
c = OADD
c = int(OADD)
goto asop
}
case '-':
c1 = getc()
if c1 == '-' {
c = LDEC
c = int(LDEC)
goto lx
}
if c1 == '=' {
c = OSUB
c = int(OSUB)
goto asop
}
case '>':
c1 = getc()
if c1 == '>' {
c = LRSH
c = int(LRSH)
c1 = getc()
if c1 == '=' {
c = ORSH
c = int(ORSH)
goto asop
}
@ -1212,19 +1280,19 @@ l0:
}
if c1 == '=' {
c = LGE
c = int(LGE)
goto lx
}
c = LGT
c = int(LGT)
case '<':
c1 = getc()
if c1 == '<' {
c = LLSH
c = int(LLSH)
c1 = getc()
if c1 == '=' {
c = OLSH
c = int(OLSH)
goto asop
}
@ -1232,43 +1300,43 @@ l0:
}
if c1 == '=' {
c = LLE
c = int(LLE)
goto lx
}
if c1 == '-' {
c = LCOMM
c = int(LCOMM)
goto lx
}
c = LLT
c = int(LLT)
case '=':
c1 = getc()
if c1 == '=' {
c = LEQ
c = int(LEQ)
goto lx
}
case '!':
c1 = getc()
if c1 == '=' {
c = LNE
c = int(LNE)
goto lx
}
case '&':
c1 = getc()
if c1 == '&' {
c = LANDAND
c = int(LANDAND)
goto lx
}
if c1 == '^' {
c = LANDNOT
c = int(LANDNOT)
c1 = getc()
if c1 == '=' {
c = OANDNOT
c = int(OANDNOT)
goto asop
}
@ -1276,49 +1344,49 @@ l0:
}
if c1 == '=' {
c = OAND
c = int(OAND)
goto asop
}
case '|':
c1 = getc()
if c1 == '|' {
c = LOROR
c = int(LOROR)
goto lx
}
if c1 == '=' {
c = OOR
c = int(OOR)
goto asop
}
case '^':
c1 = getc()
if c1 == '=' {
c = OXOR
c = int(OXOR)
goto asop
}
/*
* clumsy dance:
* to implement rule that disallows
* if T{1}[0] { ... }
* but allows
* if (T{1}[0]) { ... }
* the block bodies for if/for/switch/select
* begin with an LBODY token, not '{'.
*
* when we see the keyword, the next
* non-parenthesized '{' becomes an LBODY.
* loophack is normally false.
* a keyword sets it to true.
* parens push loophack onto a stack and go back to false.
* a '{' with loophack == true becomes LBODY and disables loophack.
*
* i said it was clumsy.
*/
// clumsy dance:
// to implement rule that disallows
// if T{1}[0] { ... }
// but allows
// if (T{1}[0]) { ... }
// the block bodies for if/for/switch/select
// begin with an LBODY token, not '{'.
//
// when we see the keyword, the next
// non-parenthesized '{' becomes an LBODY.
// loophack is normally false.
// a keyword sets it to true.
// parens push loophack onto a stack and go back to false.
// a '{' with loophack == true becomes LBODY and disables loophack.
//
// I said it was clumsy.
//
// We only need the loophack when running with -oldparser.
case '(', '[':
if loophack || _yylex_lstk != nil {
if oldparser != 0 && (loophack || _yylex_lstk != nil) {
h = new(Loophack)
if h == nil {
Flusherrors()
@ -1335,7 +1403,7 @@ l0:
goto lx
case ')', ']':
if _yylex_lstk != nil {
if oldparser != 0 && _yylex_lstk != nil {
h = _yylex_lstk
loophack = h.v
_yylex_lstk = h.next
@ -1344,7 +1412,7 @@ l0:
goto lx
case '{':
if loophack {
if oldparser != 0 && loophack {
if Debug['x'] != 0 {
fmt.Printf("%v lex: LBODY\n", Ctxt.Line(int(lexlineno)))
}
@ -1389,10 +1457,8 @@ asop:
}
return LASOP
/*
* cp is set to lexbuf and some
* prefix has been stored
*/
// cp is set to lexbuf and some
// prefix has been stored
talph:
for {
if c >= utf8.RuneSelf {
@ -1403,6 +1469,9 @@ talph:
if !unicode.IsLetter(r) && !unicode.IsDigit(r) && (importpkg == nil || r != 0xb7) {
Yyerror("invalid identifier character U+%04x", r)
}
if cp.Len() == 0 && unicode.IsDigit(r) {
Yyerror("identifier cannot begin with digit U+%04x", r)
}
cp.WriteRune(r)
} else if !isAlnum(c) && c != '_' {
break
@ -1421,7 +1490,9 @@ talph:
goto l0
case LFOR, LIF, LSWITCH, LSELECT:
loophack = true // see comment about loophack above
if oldparser != 0 {
loophack = true // see comment about loophack above
}
}
if Debug['x'] != 0 {
@ -1500,7 +1571,7 @@ casei:
mpatoflt(&yylval.val.U.(*Mpcplx).Imag, str)
if yylval.val.U.(*Mpcplx).Imag.Val.IsInf() {
Yyerror("overflow in imaginary constant")
Mpmovecflt(&yylval.val.U.(*Mpcplx).Real, 0.0)
Mpmovecflt(&yylval.val.U.(*Mpcplx).Imag, 0.0)
}
if Debug['x'] != 0 {
@ -1557,12 +1628,10 @@ func more(pp *string) bool {
return p != ""
}
/*
* read and interpret syntax that looks like
* //line parse.y:15
* as a discontinuity in sequential line numbers.
* the next line of input comes from parse.y:15
*/
// read and interpret syntax that looks like
// //line parse.y:15
// as a discontinuity in sequential line numbers.
// the next line of input comes from parse.y:15
func getlinepragma() int {
var cmd, verb, name string
@ -1629,6 +1698,11 @@ func getlinepragma() int {
return c
}
if verb == "go:noinline" {
noinline = true
return c
}
if verb == "go:systemstack" {
if compiling_runtime == 0 {
Yyerror("//go:systemstack only allowed in runtime")
@ -1644,6 +1718,15 @@ func getlinepragma() int {
nowritebarrier = true
return c
}
if verb == "go:nowritebarrierrec" {
if compiling_runtime == 0 {
Yyerror("//go:nowritebarrierrec only allowed in runtime")
}
nowritebarrierrec = true
nowritebarrier = true // Implies nowritebarrier
return c
}
return c
}
if c != 'l' {
@ -1851,13 +1934,18 @@ func (yy) Error(msg string) {
Yyerror("%s", msg)
}
var oldparser int // if set, theparser is used (otherwise we use the recursive-descent parser)
var theparser yyParser
var parsing bool
func yyparse() {
theparser = yyNewParser()
parsing = true
theparser.Parse(yy{})
if oldparser != 0 {
theparser = yyNewParser()
theparser.Parse(yy{})
} else {
parse_file()
}
parsing = false
}
@ -2121,10 +2209,10 @@ hex:
var syms = []struct {
name string
lexical int
etype int
op int
etype EType
op Op
}{
/* basic types */
// basic types
{"int8", LNAME, TINT8, OXXX},
{"int16", LNAME, TINT16, OXXX},
{"int32", LNAME, TINT32, OXXX},
@ -2195,7 +2283,7 @@ func lexinit() {
s1.Lexical = uint16(lex)
if etype := s.etype; etype != Txxx {
if etype < 0 || etype >= len(Types) {
if int(etype) >= len(Types) {
Fatalf("lexinit: %s bad etype", s.name)
}
s2 := Pkglookup(s.name, builtinpkg)
@ -2216,12 +2304,13 @@ func lexinit() {
continue
}
// TODO(marvin): Fix Node.EType type union.
if etype := s.op; etype != OXXX {
s2 := Pkglookup(s.name, builtinpkg)
s2.Lexical = LNAME
s2.Def = Nod(ONAME, nil, nil)
s2.Def.Sym = s2
s2.Def.Etype = uint8(etype)
s2.Def.Etype = EType(etype)
}
}
@ -2330,38 +2419,34 @@ func lexinit1() {
}
func lexfini() {
var s *Sym
var lex int
var etype int
var i int
for i = 0; i < len(syms); i++ {
lex = syms[i].lexical
for i := range syms {
lex := syms[i].lexical
if lex != LNAME {
continue
}
s = Lookup(syms[i].name)
s := Lookup(syms[i].name)
s.Lexical = uint16(lex)
etype = syms[i].etype
etype := syms[i].etype
if etype != Txxx && (etype != TANY || Debug['A'] != 0) && s.Def == nil {
s.Def = typenod(Types[etype])
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
etype = syms[i].op
if etype != OXXX && s.Def == nil {
// TODO(marvin): Fix Node.EType type union.
etype = EType(syms[i].op)
if etype != EType(OXXX) && s.Def == nil {
s.Def = Nod(ONAME, nil, nil)
s.Def.Sym = s
s.Def.Etype = uint8(etype)
s.Def.Etype = etype
s.Origpkg = builtinpkg
}
}
// backend-specific builtin types (e.g. int).
for i = range Thearch.Typedefs {
s = Lookup(Thearch.Typedefs[i].Name)
for i := range Thearch.Typedefs {
s := Lookup(Thearch.Typedefs[i].Name)
if s.Def == nil {
s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
s.Def.Name = new(Name)
@ -2371,30 +2456,25 @@ func lexfini() {
// there's only so much table-driven we can handle.
// these are special cases.
s = Lookup("byte")
if s.Def == nil {
if s := Lookup("byte"); s.Def == nil {
s.Def = typenod(bytetype)
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
s = Lookup("error")
if s.Def == nil {
if s := Lookup("error"); s.Def == nil {
s.Def = typenod(errortype)
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
s = Lookup("rune")
if s.Def == nil {
if s := Lookup("rune"); s.Def == nil {
s.Def = typenod(runetype)
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
s = Lookup("nil")
if s.Def == nil {
if s := Lookup("nil"); s.Def == nil {
var v Val
v.U = new(NilVal)
s.Def = nodlit(v)
@ -2403,23 +2483,20 @@ func lexfini() {
s.Origpkg = builtinpkg
}
s = Lookup("iota")
if s.Def == nil {
if s := Lookup("iota"); s.Def == nil {
s.Def = Nod(OIOTA, nil, nil)
s.Def.Sym = s
s.Origpkg = builtinpkg
}
s = Lookup("true")
if s.Def == nil {
if s := Lookup("true"); s.Def == nil {
s.Def = Nodbool(true)
s.Def.Sym = s
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
s = Lookup("false")
if s.Def == nil {
if s := Lookup("false"); s.Def == nil {
s.Def = Nodbool(false)
s.Def.Sym = s
s.Def.Name = new(Name)

View File

@ -4,8 +4,9 @@
// +build ignore
// Generate builtin.go from $* (runtime.go and unsafe.go).
// Run this after changing runtime.go and unsafe.go
// Generate builtin.go from builtin/runtime.go and builtin/unsafe.go
// (passed as arguments on the command line by a go:generate comment).
// Run this after changing builtin/runtime.go and builtin/unsafe.go
// or after changing the export metadata format in the compiler.
// Either way, you need to have a working compiler binary first.
package main

View File

@ -113,7 +113,7 @@ func mpgetflt(a *Mpflt) float64 {
Yyerror("mpgetflt ovf")
}
return x
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
func mpgetflt32(a *Mpflt) float64 {
@ -125,7 +125,7 @@ func mpgetflt32(a *Mpflt) float64 {
Yyerror("mpgetflt32 ovf")
}
return x
return x + 0 // avoid -0 (should not be needed, but be conservative)
}
func Mpmovecflt(a *Mpflt, c float64) {
@ -133,6 +133,10 @@ func Mpmovecflt(a *Mpflt, c float64) {
fmt.Printf("\nconst %g", c)
}
// convert -0 to 0
if c == 0 {
c = 0
}
a.Val.SetFloat64(c)
if Mpdebug {
@ -141,7 +145,10 @@ func Mpmovecflt(a *Mpflt, c float64) {
}
func mpnegflt(a *Mpflt) {
a.Val.Neg(&a.Val)
// avoid -0
if a.Val.Sign() != 0 {
a.Val.Neg(&a.Val)
}
}
//
@ -163,15 +170,20 @@ func mpatoflt(a *Mpflt, as string) {
// - decimal point and binary point in constant
// TODO(gri) use different conversion function or check separately
Yyerror("malformed constant: %s", as)
a.Val.SetUint64(0)
a.Val.SetFloat64(0)
return
}
if f.IsInf() {
Yyerror("constant too large: %s", as)
a.Val.SetUint64(0)
a.Val.SetFloat64(0)
return
}
// -0 becomes 0
if f.Sign() == 0 && f.Signbit() {
a.Val.SetFloat64(0)
}
}
func (f *Mpflt) String() string {
@ -188,13 +200,18 @@ func Fconv(fvp *Mpflt, flag int) string {
// determine sign
f := &fvp.Val
var sign string
if fvp.Val.Signbit() {
if f.Sign() < 0 {
sign = "-"
f = new(big.Float).Abs(f)
} else if flag&obj.FmtSign != 0 {
sign = "+"
}
// Don't try to convert infinities (will not terminate).
if f.IsInf() {
return sign + "Inf"
}
// Use fmt formatting if in float64 range (common case).
if x, _ := f.Float64(); !math.IsInf(x, 0) {
return fmt.Sprintf("%s%.6g", sign, x)

View File

@ -10,9 +10,7 @@ import (
"strconv"
)
/*
* architecture-independent object file output
*/
// architecture-independent object file output
const (
ArhdrSize = 60
)
@ -279,7 +277,7 @@ func Datastring(s string, a *obj.Addr) {
a.Sym = Linksym(symdata)
a.Node = symdata.Def
a.Offset = 0
a.Etype = Simtype[TINT]
a.Etype = uint8(Simtype[TINT])
}
func datagostring(sval string, a *obj.Addr) {
@ -289,7 +287,7 @@ func datagostring(sval string, a *obj.Addr) {
a.Sym = Linksym(symhdr)
a.Node = symhdr.Def
a.Offset = 0
a.Etype = TSTRING
a.Etype = uint8(TSTRING)
}
func dgostringptr(s *Sym, off int, str string) int {
@ -314,7 +312,7 @@ func dgostrlitptr(s *Sym, off int, lit *string) int {
p.From3.Offset = int64(Widthptr)
datagostring(*lit, &p.To)
p.To.Type = obj.TYPE_ADDR
p.To.Etype = Simtype[TINT]
p.To.Etype = uint8(Simtype[TINT])
off += Widthptr
return off
@ -375,8 +373,8 @@ func gdata(nam *Node, nr *Node, wid int) {
}
func gdatacomplex(nam *Node, cval *Mpcplx) {
w := cplxsubtype(int(nam.Type.Etype))
w = int(Types[w].Width)
cst := cplxsubtype(nam.Type.Etype)
w := int(Types[cst].Width)
p := Thearch.Gins(obj.ADATA, nam, nil)
p.From3 = new(obj.Addr)

View File

@ -396,7 +396,7 @@ func ordercall(n *Node, order *Order) {
// contain m or k. They are usually unnecessary, but in the unnecessary
// cases they are also typically registerizable, so not much harm done.
// And this only applies to the multiple-assignment form.
// We could do a more precise analysis if needed, like in walk.c.
// We could do a more precise analysis if needed, like in walk.go.
//
// Ordermapassign also inserts these temporaries if needed for
// calling writebarrierfat with a pointer to n->right.
@ -408,7 +408,7 @@ func ordermapassign(n *Node, order *Order) {
case OAS:
order.out = list(order.out, n)
// We call writebarrierfat only for values > 4 pointers long. See walk.c.
// We call writebarrierfat only for values > 4 pointers long. See walk.go.
if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
m := n.Left
n.Left = ordertemp(m.Type, order, false)
@ -434,7 +434,7 @@ func ordermapassign(n *Node, order *Order) {
a = Nod(OAS, m, l.N)
typecheck(&a, Etop)
post = list(post, a)
} else if flag_race != 0 && n.Op == OAS2FUNC && !isblank(l.N) {
} else if instrumenting && n.Op == OAS2FUNC && !isblank(l.N) {
m = l.N
l.N = ordertemp(m.Type, order, false)
a = Nod(OAS, m, l.N)
@ -509,7 +509,8 @@ func orderstmt(n *Node, order *Order) {
tmp1.Etype = 0 // now an rvalue not an lvalue
}
tmp1 = ordercopyexpr(tmp1, n.Left.Type, order, 0)
n.Right = Nod(int(n.Etype), tmp1, n.Right)
// TODO(marvin): Fix Node.EType type union.
n.Right = Nod(Op(n.Etype), tmp1, n.Right)
typecheck(&n.Right, Erv)
orderexpr(&n.Right, order, nil)
n.Etype = 0
@ -756,7 +757,7 @@ func orderstmt(n *Node, order *Order) {
ordercallargs(&n.List, order)
order.out = list(order.out, n)
// Special: clean case temporaries in each block entry.
// Special: clean case temporaries in each block entry.
// Select must enter one of its blocks, so there is no
// need for a cleaning at the end.
// Doubly special: evaluation order for select is stricter
@ -1093,7 +1094,7 @@ func orderexpr(np **Node, order *Order, lhs *Node) {
OREAL,
ORECOVER:
ordercall(n, order)
if lhs == nil || lhs.Op != ONAME || flag_race != 0 {
if lhs == nil || lhs.Op != ONAME || instrumenting {
n = ordercopyexpr(n, n.Type, order, 0)
}
@ -1153,7 +1154,7 @@ func orderexpr(np **Node, order *Order, lhs *Node) {
// TODO(rsc): The Isfat is for consistency with componentgen and walkexpr.
// It needs to be removed in all three places.
// That would allow inlining x.(struct{*int}) the same as x.(*int).
if !isdirectiface(n.Type) || Isfat(n.Type) || flag_race != 0 {
if !isdirectiface(n.Type) || Isfat(n.Type) || instrumenting {
n = ordercopyexpr(n, n.Type, order, 1)
}

File diff suppressed because it is too large Load Diff

View File

@ -130,6 +130,9 @@ func gcsymdup(s *Sym) {
}
func emitptrargsmap() {
if Curfn.Func.Nname.Sym.Name == "_" {
return
}
sym := Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Func.Nname.Sym.Name))
nptr := int(Curfn.Type.Argwid / int64(Widthptr))
@ -283,7 +286,7 @@ func allocauto(ptxt *obj.Prog) {
if haspointers(n.Type) {
stkptrsize = Stksize
}
if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
Stksize = Rnd(Stksize, int64(Widthptr))
}
if Stksize >= 1<<31 {
@ -320,7 +323,7 @@ func Cgen_checknil(n *Node) {
Fatalf("bad checknil")
}
if ((Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
var reg Node
Regalloc(&reg, Types[Tptr], n)
Cgen(n, &reg)
@ -406,8 +409,8 @@ func compile(fn *Node) {
if nerrors != 0 {
goto ret
}
if flag_race != 0 {
racewalk(Curfn)
if instrumenting {
instrument(Curfn)
}
if nerrors != 0 {
goto ret

View File

@ -821,7 +821,7 @@ func checkparam(fn *Node, p *obj.Prog, n *Node) {
return
}
var a *Node
var class uint8
var class Class
for l := fn.Func.Dcl; l != nil; l = l.Next {
a = l.N
class = a.Class &^ PHEAP
@ -1434,7 +1434,14 @@ func livenessepilogue(lv *Liveness) {
// the PCDATA must begin one instruction early too.
// The instruction before a call to deferreturn is always a
// no-op, to keep PC-specific data unambiguous.
splicebefore(lv, bb, newpcdataprog(p.Opt.(*obj.Prog), pos), p.Opt.(*obj.Prog))
prev := p.Opt.(*obj.Prog)
if Ctxt.Arch.Thechar == '9' {
// On ppc64 there is an additional instruction
// (another no-op or reload of toc pointer) before
// the call.
prev = prev.Opt.(*obj.Prog)
}
splicebefore(lv, bb, newpcdataprog(prev, pos), prev)
} else {
splicebefore(lv, bb, newpcdataprog(p, pos), p)
}

View File

@ -88,7 +88,7 @@ func Noreturn(p *obj.Prog) bool {
// longer and more difficult to follow during debugging.
// Remove them.
/* what instruction does a JMP to p eventually land on? */
// what instruction does a JMP to p eventually land on?
func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
n := 0
for p != nil && p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH {
@ -104,14 +104,12 @@ func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
return p
}
/*
* reuse reg pointer for mark/sweep state.
* leave reg==nil at end because alive==nil.
*/
// reuse reg pointer for mark/sweep state.
// leave reg==nil at end because alive==nil.
var alive interface{} = nil
var dead interface{} = 1
/* mark all code reachable from firstp as alive */
// mark all code reachable from firstp as alive
func mark(firstp *obj.Prog) {
for p := firstp; p != nil; p = p.Link {
if p.Opt != dead {
@ -335,21 +333,19 @@ func Flowend(graph *Graph) {
}
}
/*
* find looping structure
*
* 1) find reverse postordering
* 2) find approximate dominators,
* the actual dominators if the flow graph is reducible
* otherwise, dominators plus some other non-dominators.
* See Matthew S. Hecht and Jeffrey D. Ullman,
* "Analysis of a Simple Algorithm for Global Data Flow Problems",
* Conf. Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
* Oct. 1-3, 1973, pp. 207-217.
* 3) find all nodes with a predecessor dominated by the current node.
* such a node is a loop head.
* recursively, all preds with a greater rpo number are in the loop
*/
// find looping structure
//
// 1) find reverse postordering
// 2) find approximate dominators,
// the actual dominators if the flow graph is reducible
// otherwise, dominators plus some other non-dominators.
// See Matthew S. Hecht and Jeffrey D. Ullman,
// "Analysis of a Simple Algorithm for Global Data Flow Problems",
// Conf. Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
// Oct. 1-3, 1973, pp. 207-217.
// 3) find all nodes with a predecessor dominated by the current node.
// such a node is a loop head.
// recursively, all preds with a greater rpo number are in the loop
func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
r.Rpo = 1
r1 := r.S1
@ -903,7 +899,7 @@ func nilopt(firstp *obj.Prog) {
return
}
if Debug_checknil > 1 { /* || strcmp(curfn->nname->sym->name, "f1") == 0 */
if Debug_checknil > 1 { // || strcmp(curfn->nname->sym->name, "f1") == 0
Dumpit("nilopt", g.Start, 0)
}

View File

@ -9,13 +9,20 @@ import (
"strings"
)
// The racewalk pass modifies the code tree for the function as follows:
// The instrument pass modifies the code tree for instrumentation.
//
// For flag_race it modifies the function as follows:
//
// 1. It inserts a call to racefuncenterfp at the beginning of each function.
// 2. It inserts a call to racefuncexit at the end of each function.
// 3. It inserts a call to raceread before each memory read.
// 4. It inserts a call to racewrite before each memory write.
//
// For flag_msan:
//
// 1. It inserts a call to msanread before each memory read.
// 2. It inserts a call to msanwrite before each memory write.
//
// The rewriting is not yet complete. Certain nodes are not rewritten
// but should be.
@ -24,11 +31,11 @@ import (
// Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion.
var omit_pkgs = []string{"runtime", "runtime/race"}
var omit_pkgs = []string{"runtime/internal/atomic", "runtime/internal/sys", "runtime", "runtime/race", "runtime/msan"}
// Only insert racefuncenterfp/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
var noinst_pkgs = []string{"sync", "sync/atomic"}
var norace_inst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) bool {
if myimportpath != "" {
@ -42,35 +49,35 @@ func ispkgin(pkgs []string) bool {
return false
}
// TODO(rsc): Remove. Put //go:norace on forkAndExecInChild instead.
func isforkfunc(fn *Node) bool {
// Special case for syscall.forkAndExecInChild.
// In the child, this function must not acquire any locks, because
// they might have been locked at the time of the fork. This means
// no rescheduling, no malloc calls, and no new stack segments.
// Race instrumentation does all of the above.
return myimportpath != "" && myimportpath == "syscall" && fn.Func.Nname.Sym.Name == "forkAndExecInChild"
}
func racewalk(fn *Node) {
if ispkgin(omit_pkgs) || isforkfunc(fn) || fn.Func.Norace {
func instrument(fn *Node) {
if ispkgin(omit_pkgs) || fn.Func.Norace {
return
}
if !ispkgin(noinst_pkgs) {
racewalklist(fn.Nbody, nil)
if flag_race == 0 || !ispkgin(norace_inst_pkgs) {
instrumentlist(fn.Nbody, nil)
// nothing interesting for race detector in fn->enter
racewalklist(fn.Func.Exit, nil)
instrumentlist(fn.Func.Exit, nil)
}
nd := mkcall("racefuncenterfp", nil, nil, Nod(OADDR, nodfp, nil))
fn.Func.Enter = concat(list1(nd), fn.Func.Enter)
nd = mkcall("racefuncexit", nil, nil)
fn.Func.Exit = list(fn.Func.Exit, nd)
if flag_race != 0 {
// nodpc is the PC of the caller as extracted by
// getcallerpc. We use -widthptr(FP) for x86.
// BUG: this will not work on arm.
nodpc := Nod(OXXX, nil, nil)
*nodpc = *nodfp
nodpc.Type = Types[TUINTPTR]
nodpc.Xoffset = int64(-Widthptr)
nd := mkcall("racefuncenter", nil, nil, nodpc)
fn.Func.Enter = concat(list1(nd), fn.Func.Enter)
nd = mkcall("racefuncexit", nil, nil)
fn.Func.Exit = list(fn.Func.Exit, nd)
}
if Debug['W'] != 0 {
s := fmt.Sprintf("after racewalk %v", fn.Func.Nname.Sym)
s := fmt.Sprintf("after instrument %v", fn.Func.Nname.Sym)
dumplist(s, fn.Nbody)
s = fmt.Sprintf("enter %v", fn.Func.Nname.Sym)
dumplist(s, fn.Func.Enter)
@ -79,12 +86,12 @@ func racewalk(fn *Node) {
}
}
func racewalklist(l *NodeList, init **NodeList) {
func instrumentlist(l *NodeList, init **NodeList) {
var instr *NodeList
for ; l != nil; l = l.Next {
instr = nil
racewalknode(&l.N, &instr, 0, 0)
instrumentnode(&l.N, &instr, 0, 0)
if init == nil {
l.N.Ninit = concat(l.N.Ninit, instr)
} else {
@ -96,7 +103,7 @@ func racewalklist(l *NodeList, init **NodeList) {
// walkexpr and walkstmt combined
// walks the tree and adds calls to the
// instrumentation code to top-level (statement) nodes' init
func racewalknode(np **Node, init **NodeList, wr int, skip int) {
func instrumentnode(np **Node, init **NodeList, wr int, skip int) {
n := *np
if n == nil {
@ -104,35 +111,35 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
}
if Debug['w'] > 1 {
Dump("racewalk-before", n)
Dump("instrument-before", n)
}
setlineno(n)
if init == nil {
Fatalf("racewalk: bad init list")
Fatalf("instrument: bad init list")
}
if init == &n.Ninit {
// If init == &n->ninit and n->ninit is non-nil,
// racewalknode might append it to itself.
// instrumentnode might append it to itself.
// nil it out and handle it separately before putting it back.
l := n.Ninit
n.Ninit = nil
racewalklist(l, nil)
racewalknode(&n, &l, wr, skip) // recurse with nil n->ninit
instrumentlist(l, nil)
instrumentnode(&n, &l, wr, skip) // recurse with nil n->ninit
appendinit(&n, l)
*np = n
return
}
racewalklist(n.Ninit, nil)
instrumentlist(n.Ninit, nil)
switch n.Op {
default:
Fatalf("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
Fatalf("instrument: unknown node type %v", Oconv(int(n.Op), 0))
case OAS, OASWB, OAS2FUNC:
racewalknode(&n.Left, init, 1, 0)
racewalknode(&n.Right, init, 0, 0)
instrumentnode(&n.Left, init, 1, 0)
instrumentnode(&n.Right, init, 0, 0)
goto ret
// can't matter
@ -144,7 +151,7 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
for l := n.List; l != nil; l = l.Next {
switch l.N.Op {
case OCALLFUNC, OCALLMETH, OCALLINTER:
racewalknode(&l.N, &out, 0, 0)
instrumentnode(&l.N, &out, 0, 0)
out = list(out, l.N)
// Scan past OAS nodes copying results off stack.
// Those must not be instrumented, because the
@ -156,7 +163,7 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
out = list(out, l.N)
}
default:
racewalknode(&l.N, &out, 0, 0)
instrumentnode(&l.N, &out, 0, 0)
out = list(out, l.N)
}
}
@ -164,22 +171,22 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
goto ret
case ODEFER:
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
goto ret
case OPROC:
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
goto ret
case OCALLINTER:
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
goto ret
// Instrument dst argument of runtime.writebarrier* calls
// as we do not instrument runtime code.
// typedslicecopy is instrumented in runtime.
case OCALLFUNC:
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
goto ret
case ONOT,
@ -189,32 +196,32 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
OIMAG,
OCOM,
OSQRT:
racewalknode(&n.Left, init, wr, 0)
instrumentnode(&n.Left, init, wr, 0)
goto ret
case ODOTINTER:
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
goto ret
case ODOT:
racewalknode(&n.Left, init, 0, 1)
instrumentnode(&n.Left, init, 0, 1)
callinstr(&n, init, wr, skip)
goto ret
case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
callinstr(&n, init, wr, skip)
goto ret
case OIND: // *p
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
callinstr(&n, init, wr, skip)
goto ret
case OSPTR, OLEN, OCAP:
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
if Istype(n.Left.Type, TMAP) {
n1 := Nod(OCONVNOP, n.Left, nil)
n1.Type = Ptrto(Types[TUINT8])
@ -243,18 +250,18 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
OGT,
OADD,
OCOMPLEX:
racewalknode(&n.Left, init, wr, 0)
racewalknode(&n.Right, init, wr, 0)
instrumentnode(&n.Left, init, wr, 0)
instrumentnode(&n.Right, init, wr, 0)
goto ret
case OANDAND, OOROR:
racewalknode(&n.Left, init, wr, 0)
instrumentnode(&n.Left, init, wr, 0)
// walk has ensured the node has moved to a location where
// side effects are safe.
// n->right may not be executed,
// so instrumentation goes to n->right->ninit, not init.
racewalknode(&n.Right, &n.Right.Ninit, wr, 0)
instrumentnode(&n.Right, &n.Right.Ninit, wr, 0)
goto ret
@ -263,57 +270,57 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
goto ret
case OCONV:
racewalknode(&n.Left, init, wr, 0)
instrumentnode(&n.Left, init, wr, 0)
goto ret
case OCONVNOP:
racewalknode(&n.Left, init, wr, 0)
instrumentnode(&n.Left, init, wr, 0)
goto ret
case ODIV, OMOD:
racewalknode(&n.Left, init, wr, 0)
racewalknode(&n.Right, init, wr, 0)
instrumentnode(&n.Left, init, wr, 0)
instrumentnode(&n.Right, init, wr, 0)
goto ret
case OINDEX:
if !Isfixedarray(n.Left.Type) {
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
} else if !islvalue(n.Left) {
// index of unaddressable array, like Map[k][i].
racewalknode(&n.Left, init, wr, 0)
instrumentnode(&n.Left, init, wr, 0)
racewalknode(&n.Right, init, 0, 0)
instrumentnode(&n.Right, init, 0, 0)
goto ret
}
racewalknode(&n.Right, init, 0, 0)
instrumentnode(&n.Right, init, 0, 0)
if n.Left.Type.Etype != TSTRING {
callinstr(&n, init, wr, skip)
}
goto ret
case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
racewalknode(&n.Left, init, 0, 0)
racewalknode(&n.Right, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
instrumentnode(&n.Right, init, 0, 0)
goto ret
case OKEY:
racewalknode(&n.Left, init, 0, 0)
racewalknode(&n.Right, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
instrumentnode(&n.Right, init, 0, 0)
goto ret
case OADDR:
racewalknode(&n.Left, init, 0, 1)
instrumentnode(&n.Left, init, 0, 1)
goto ret
// n->left is Type* which is not interesting.
case OEFACE:
racewalknode(&n.Right, init, 0, 0)
instrumentnode(&n.Right, init, 0, 0)
goto ret
case OITAB:
racewalknode(&n.Left, init, 0, 0)
instrumentnode(&n.Left, init, 0, 0)
goto ret
// should not appear in AST by now
@ -357,31 +364,31 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
OAS2RECV,
OAS2MAPR,
OASOP:
Yyerror("racewalk: %v must be lowered by now", Oconv(int(n.Op), 0))
Yyerror("instrument: %v must be lowered by now", Oconv(int(n.Op), 0))
goto ret
// impossible nodes: only appear in backend.
case ORROTC, OEXTEND:
Yyerror("racewalk: %v cannot exist now", Oconv(int(n.Op), 0))
Yyerror("instrument: %v cannot exist now", Oconv(int(n.Op), 0))
goto ret
case OGETG:
Yyerror("racewalk: OGETG can happen only in runtime which we don't instrument")
Yyerror("instrument: OGETG can happen only in runtime which we don't instrument")
goto ret
case OFOR:
if n.Left != nil {
racewalknode(&n.Left, &n.Left.Ninit, 0, 0)
instrumentnode(&n.Left, &n.Left.Ninit, 0, 0)
}
if n.Right != nil {
racewalknode(&n.Right, &n.Right.Ninit, 0, 0)
instrumentnode(&n.Right, &n.Right.Ninit, 0, 0)
}
goto ret
case OIF, OSWITCH:
if n.Left != nil {
racewalknode(&n.Left, &n.Left.Ninit, 0, 0)
instrumentnode(&n.Left, &n.Left.Ninit, 0, 0)
}
goto ret
@ -418,16 +425,17 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
ret:
if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
racewalklist(n.List, init)
instrumentlist(n.List, init)
}
racewalklist(n.Nbody, nil)
racewalklist(n.Rlist, nil)
instrumentlist(n.Nbody, nil)
instrumentlist(n.Rlist, nil)
*np = n
}
func isartificial(n *Node) bool {
// compiler-emitted artificial things that we do not want to instrument,
// cant' possibly participate in a data race.
// can't possibly participate in a data race.
// can't be seen by C/C++ and therefore irrelevant for msan.
if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
if n.Sym.Name == "_" {
return true
@ -489,13 +497,31 @@ func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
n = treecopy(n, 0)
makeaddable(n)
var f *Node
if t.Etype == TSTRUCT || Isfixedarray(t) {
if flag_msan != 0 {
name := "msanread"
if wr != 0 {
name = "msanwrite"
}
// dowidth may not have been called for PEXTERN.
dowidth(t)
w := t.Width
if w == BADWIDTH {
Fatalf("instrument: %v badwidth", t)
}
f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(w))
} else if flag_race != 0 && (t.Etype == TSTRUCT || Isfixedarray(t)) {
name := "racereadrange"
if wr != 0 {
name = "racewriterange"
}
f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(t.Width))
} else {
// dowidth may not have been called for PEXTERN.
dowidth(t)
w := t.Width
if w == BADWIDTH {
Fatalf("instrument: %v badwidth", t)
}
f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(w))
} else if flag_race != 0 {
name := "raceread"
if wr != 0 {
name = "racewrite"

View File

@ -6,9 +6,7 @@ package gc
import "cmd/internal/obj"
/*
* range
*/
// range
func typecheckrange(n *Node) {
var toomany int
var why string
@ -340,7 +338,7 @@ func walkrange(n *Node) {
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func memclrrange(n, v1, v2, a *Node) bool {
if Debug['N'] != 0 || flag_race != 0 {
if Debug['N'] != 0 || instrumenting {
return false
}
if v1 == nil || v2 != nil {

View File

@ -12,9 +12,7 @@ import (
"sort"
)
/*
* runtime interface and reflection data structures
*/
// runtime interface and reflection data structures
var signatlist *NodeList
// byMethodNameAndPackagePath sorts method signatures by name, then package path.
@ -237,10 +235,8 @@ func hiter(t *Type) *Type {
return i
}
/*
* f is method type, with receiver.
* return function type, receiver as first argument (or not).
*/
// f is method type, with receiver.
// return function type, receiver as first argument (or not).
func methodfunc(f *Type, receiver *Type) *Type {
var in *NodeList
if receiver != nil {
@ -477,10 +473,8 @@ func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
return dsymptr(s, ot, pkg.Pathsym, 0)
}
/*
* uncommonType
* ../../runtime/type.go:/uncommonType
*/
// uncommonType
// ../../runtime/type.go:/uncommonType
func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
m := methods(t)
if t.Sym == nil && len(m) == 0 {
@ -686,10 +680,8 @@ func typeptrdata(t *Type) int64 {
}
}
/*
* commonType
* ../../runtime/type.go:/commonType
*/
// commonType
// ../../runtime/type.go:/commonType
var dcommontype_algarray *Sym
@ -997,7 +989,7 @@ func dtypesym(t *Type) *Sym {
dupok = obj.DUPOK
}
if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
if localpkg.Name == "runtime" && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
goto ok
}
@ -1040,7 +1032,7 @@ ok:
ot = dsymptr(s, ot, s1, 0)
}
// ../../runtime/type.go:/ChanType
// ../../runtime/type.go:/ChanType
case TCHAN:
s1 := dtypesym(t.Type)
@ -1114,7 +1106,7 @@ ok:
ot = dsymptr(s, ot, dtypesym(a.type_), 0)
}
// ../../../runtime/type.go:/MapType
// ../../../runtime/type.go:/MapType
case TMAP:
s1 := dtypesym(t.Down)
@ -1162,7 +1154,7 @@ ok:
xt = ot - 2*Widthptr
ot = dsymptr(s, ot, s1, 0)
// ../../runtime/type.go:/StructType
// ../../runtime/type.go:/StructType
// for security, only the exported fields.
case TSTRUCT:
n := 0
@ -1188,7 +1180,8 @@ ok:
}
} else {
ot = dgostringptr(s, ot, "")
if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg {
if t1.Type.Sym != nil &&
(t1.Type.Sym.Pkg == builtinpkg || !exportname(t1.Type.Sym.Name)) {
ot = dgopkgpath(s, ot, localpkg)
} else {
ot = dgostringptr(s, ot, "")
@ -1273,8 +1266,8 @@ func dumptypestructs() {
// so this is as good as any.
// another possible choice would be package main,
// but using runtime means fewer copies in .6 files.
if compiling_runtime != 0 {
for i := 1; i <= TBOOL; i++ {
if localpkg.Name == "runtime" {
for i := EType(1); i <= TBOOL; i++ {
dtypesym(Ptrto(Types[i]))
}
dtypesym(Ptrto(Types[TSTRING]))
@ -1292,6 +1285,9 @@ func dumptypestructs() {
if flag_race != 0 {
dimportpath(racepkg)
}
if flag_msan != 0 {
dimportpath(msanpkg)
}
dimportpath(mkpkg("main"))
}
}

View File

@ -48,7 +48,7 @@ type Var struct {
width int
id int // index in vars
name int8
etype int8
etype EType
addr int8
}
@ -218,10 +218,8 @@ func walkvardef(n *Node, f *Flow, active int) {
}
}
/*
* add mov b,rn
* just after r
*/
// add mov b,rn
// just after r
func addmove(r *Flow, bn int, rn int, f int) {
p1 := Ctxt.NewProg()
Clearp(p1)
@ -251,7 +249,7 @@ func addmove(r *Flow, bn int, rn int, f int) {
p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
// TODO(rsc): Remove special case here.
if (Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
}
p1.From.Type = obj.TYPE_REG
@ -282,9 +280,7 @@ func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
}
func mkvar(f *Flow, a *obj.Addr) Bits {
/*
* mark registers used
*/
// mark registers used
if a.Type == obj.TYPE_NONE {
return zbits
}
@ -306,7 +302,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
// TODO(rsc): Remove special case here.
case obj.TYPE_ADDR:
var bit Bits
if Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
goto memcase
}
a.Type = obj.TYPE_MEM
@ -356,7 +352,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
if node.Sym == nil || node.Sym.Name[0] == '.' {
return zbits
}
et := int(a.Etype)
et := EType(a.Etype)
o := a.Offset
w := a.Width
if w < 0 {
@ -369,7 +365,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
v = &vars[i]
if v.node == node && int(v.name) == n {
if v.offset == o {
if int(v.etype) == et {
if v.etype == et {
if int64(v.width) == w {
// TODO(rsc): Remove special case for arm here.
if flag == 0 || Thearch.Thechar != '5' {
@ -423,7 +419,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
v.id = i
v.offset = o
v.name = int8(n)
v.etype = int8(et)
v.etype = et
v.width = int(w)
v.addr = int8(flag) // funny punning
v.node = node
@ -460,7 +456,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
}
// Treat values with their address taken as live at calls,
// because the garbage collector's liveness analysis in ../gc/plive.c does.
// because the garbage collector's liveness analysis in plive.go does.
// These must be consistent or else we will elide stores and the garbage
// collector will see uninitialized data.
// The typical case where our own analysis is out of sync is when the
@ -473,7 +469,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
// sets addrtaken, even though it ends up not being actually shared.
// If we were better about _ elision, _ = &x would suffice too.
// The broader := in a closure problem is mentioned in a comment in
// closure.c:/^typecheckclosure and dcl.c:/^oldname.
// closure.go:/^typecheckclosure and dcl.go:/^oldname.
if node.Addrtaken {
v.addr = 1
}
@ -491,7 +487,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
}
if Debug['R'] != 0 {
fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(int(et), 0), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(et), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
}
Ostats.Nvar++
@ -655,7 +651,7 @@ func allreg(b uint64, r *Rgn) uint64 {
r.regno = 0
switch v.etype {
default:
Fatalf("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
Fatalf("unknown etype %d/%v", Bitno(b), Econv(v.etype))
case TINT8,
TUINT8,
@ -1036,11 +1032,9 @@ func Dumpit(str string, r0 *Flow, isreg int) {
func regopt(firstp *obj.Prog) {
mergetemp(firstp)
/*
* control flow is more complicated in generated go code
* than in generated c code. define pseudo-variables for
* registers, so we have complete register usage information.
*/
// control flow is more complicated in generated go code
// than in generated c code. define pseudo-variables for
// registers, so we have complete register usage information.
var nreg int
regnames := Thearch.Regnames(&nreg)
@ -1063,12 +1057,10 @@ func regopt(firstp *obj.Prog) {
ivar = zbits
ovar = zbits
/*
* pass 1
* build aux data structure
* allocate pcs
* find use and set of variables
*/
// pass 1
// build aux data structure
// allocate pcs
// find use and set of variables
g := Flowstart(firstp, func() interface{} { return new(Reg) })
if g == nil {
for i := 0; i < nvar; i++ {
@ -1151,7 +1143,7 @@ func regopt(firstp *obj.Prog) {
}
if Debug['R'] != 0 && Debug['v'] != 0 {
fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(int(v.etype), 0), v.width, v.node, v.offset)
fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(v.etype), v.width, v.node, v.offset)
}
}
@ -1159,23 +1151,19 @@ func regopt(firstp *obj.Prog) {
Dumpit("pass1", firstf, 1)
}
/*
* pass 2
* find looping structure
*/
// pass 2
// find looping structure
flowrpo(g)
if Debug['R'] != 0 && Debug['v'] != 0 {
Dumpit("pass2", firstf, 1)
}
/*
* pass 2.5
* iterate propagating fat vardef covering forward
* r->act records vars with a VARDEF since the last CALL.
* (r->act will be reused in pass 5 for something else,
* but we'll be done with it by then.)
*/
// pass 2.5
// iterate propagating fat vardef covering forward
// r->act records vars with a VARDEF since the last CALL.
// (r->act will be reused in pass 5 for something else,
// but we'll be done with it by then.)
active := 0
for f := firstf; f != nil; f = f.Link {
@ -1192,11 +1180,9 @@ func regopt(firstp *obj.Prog) {
}
}
/*
* pass 3
* iterate propagating usage
* back until flow graph is complete
*/
// pass 3
// iterate propagating usage
// back until flow graph is complete
var f1 *Flow
var i int
var f *Flow
@ -1212,7 +1198,7 @@ loop1:
}
}
/* pick up unreachable code */
// pick up unreachable code
loop11:
i = 0
@ -1235,11 +1221,9 @@ loop11:
Dumpit("pass3", firstf, 1)
}
/*
* pass 4
* iterate propagating register/variable synchrony
* forward until graph is complete
*/
// pass 4
// iterate propagating register/variable synchrony
// forward until graph is complete
loop2:
change = 0
@ -1255,10 +1239,8 @@ loop2:
Dumpit("pass4", firstf, 1)
}
/*
* pass 4.5
* move register pseudo-variables into regu.
*/
// pass 4.5
// move register pseudo-variables into regu.
mask := uint64((1 << uint(nreg)) - 1)
for f := firstf; f != nil; f = f.Link {
r := f.Data.(*Reg)
@ -1278,11 +1260,9 @@ loop2:
Dumpit("pass4.5", firstf, 1)
}
/*
* pass 5
* isolate regions
* calculate costs (paint1)
*/
// pass 5
// isolate regions
// calculate costs (paint1)
var bit Bits
if f := firstf; f != nil {
r := f.Data.(*Reg)
@ -1358,11 +1338,9 @@ loop2:
Dumpit("pass5", firstf, 1)
}
/*
* pass 6
* determine used registers (paint2)
* replace code (paint3)
*/
// pass 6
// determine used registers (paint2)
// replace code (paint3)
if Debug['R'] != 0 && Debug['v'] != 0 {
fmt.Printf("\nregisterizing\n")
}
@ -1379,16 +1357,14 @@ loop2:
if rgp.regno != 0 {
if Debug['R'] != 0 && Debug['v'] != 0 {
v := &vars[rgp.varno]
fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(int(v.etype), 0), obj.Rconv(int(rgp.regno)), usedreg, vreg)
fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(v.etype), obj.Rconv(int(rgp.regno)), usedreg, vreg)
}
paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
}
}
/*
* free aux structures. peep allocates new ones.
*/
// free aux structures. peep allocates new ones.
for i := 0; i < nvar; i++ {
vars[i].node.SetOpt(nil)
}
@ -1404,17 +1380,13 @@ loop2:
firstf = nil
}
/*
* pass 7
* peep-hole on basic block
*/
// pass 7
// peep-hole on basic block
if Debug['R'] == 0 || Debug['P'] != 0 {
Thearch.Peep(firstp)
}
/*
* eliminate nops
*/
// eliminate nops
for p := firstp; p != nil; p = p.Link {
for p.Link != nil && p.Link.As == obj.ANOP {
p.Link = p.Link.Link

View File

@ -4,9 +4,7 @@
package gc
/*
* select
*/
// select
func typecheckselect(sel *Node) {
var ncase *Node
var n *Node
@ -109,7 +107,7 @@ func walkselect(sel *Node) {
}
// optimization: one-case select: single op.
// TODO(rsc): Reenable optimization once order.c can handle it.
// TODO(rsc): Reenable optimization once order.go can handle it.
// golang.org/issue/7672.
if i == 1 {
cas := sel.List.N

View File

@ -9,9 +9,7 @@ import (
"fmt"
)
/*
* static initialization
*/
// static initialization
const (
InitNotStarted = 0
InitDone = 1
@ -248,10 +246,8 @@ func initfix(l *NodeList) *NodeList {
return lout
}
/*
* compilation of top-level (static) assignments
* into DATA statements if at all possible.
*/
// compilation of top-level (static) assignments
// into DATA statements if at all possible.
func staticinit(n *Node, out **NodeList) bool {
if n.Op != ONAME || n.Class != PEXTERN || n.Name.Defn == nil || n.Name.Defn.Op != OAS {
Fatalf("staticinit")
@ -489,13 +485,11 @@ func staticassign(l *Node, r *Node, out **NodeList) bool {
return false
}
/*
* from here down is the walk analysis
* of composite literals.
* most of the work is to generate
* data statements for the constant
* part of the composite literal.
*/
// from here down is the walk analysis
// of composite literals.
// most of the work is to generate
// data statements for the constant
// part of the composite literal.
func staticname(t *Type, ctxt int) *Node {
n := newname(Lookupf("statictmp_%.4d", statuniqgen))
statuniqgen++
@ -765,7 +759,7 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
// set auto to point at new temp or heap (3 assign)
var a *Node
if x := prealloc[n]; x != nil {
// temp allocated during order.c for dddarg
// temp allocated during order.go for dddarg
x.Type = t
if vstat == nil {

View File

@ -134,9 +134,6 @@ func buildssa(fn *Node) (ssafn *ssa.Func, usessa bool) {
s.Unimplementedf("local variable with class %s%s unimplemented", classnames[n.Class&^PHEAP], str)
}
}
// nodfp is a special argument which is the function's FP.
aux := &ssa.ArgSymbol{Typ: Types[TUINTPTR], Node: nodfp}
s.decladdrs[nodfp] = s.entryNewValue1A(ssa.OpAddr, Types[TUINTPTR], aux, s.sp)
// Convert the AST-based IR to the SSA-based IR
s.stmtList(fn.Func.Enter)
@ -847,8 +844,8 @@ func (s *state) stmt(n *Node) {
}
type opAndType struct {
op uint8
etype uint8
op Op
etype EType
}
var opToSSA = map[opAndType]ssa.Op{
@ -1061,7 +1058,7 @@ var opToSSA = map[opAndType]ssa.Op{
opAndType{OSQRT, TFLOAT64}: ssa.OpSqrt,
}
func (s *state) concreteEtype(t *Type) uint8 {
func (s *state) concreteEtype(t *Type) EType {
e := t.Etype
switch e {
default:
@ -1084,11 +1081,11 @@ func (s *state) concreteEtype(t *Type) uint8 {
}
}
func (s *state) ssaOp(op uint8, t *Type) ssa.Op {
func (s *state) ssaOp(op Op, t *Type) ssa.Op {
etype := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype}]
if !ok {
s.Unimplementedf("unhandled binary op %s %s", opnames[op], Econv(int(etype), 0))
s.Unimplementedf("unhandled binary op %s %s", opnames[op], Econv(etype))
}
return x
}
@ -1102,20 +1099,20 @@ func floatForComplex(t *Type) *Type {
}
type opAndTwoTypes struct {
op uint8
etype1 uint8
etype2 uint8
op Op
etype1 EType
etype2 EType
}
type twoTypes struct {
etype1 uint8
etype2 uint8
etype1 EType
etype2 EType
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
intermediateType uint8
intermediateType EType
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
@ -1241,21 +1238,21 @@ var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
}
func (s *state) ssaShiftOp(op uint8, t *Type, u *Type) ssa.Op {
func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op {
etype1 := s.concreteEtype(t)
etype2 := s.concreteEtype(u)
x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
if !ok {
s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(int(etype1), 0), Econv(int(etype2), 0))
s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(etype1), Econv(etype2))
}
return x
}
func (s *state) ssaRotateOp(op uint8, t *Type) ssa.Op {
func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op {
etype1 := s.concreteEtype(t)
x, ok := opToSSA[opAndType{op, etype1}]
if !ok {
s.Unimplementedf("unhandled rotate op %s etype=%s", opnames[op], Econv(int(etype1), 0))
s.Unimplementedf("unhandled rotate op %s etype=%s", opnames[op], Econv(etype1))
}
return x
}
@ -1402,7 +1399,7 @@ func (s *state) expr(n *Node) *ssa.Value {
return nil
}
if etypesign(from.Etype) != etypesign(to.Etype) {
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, Econv(int(from.Etype), 0), to, Econv(int(to.Etype), 0))
s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, Econv(from.Etype), to, Econv(to.Etype))
return nil
}
@ -1547,7 +1544,7 @@ func (s *state) expr(n *Node) *ssa.Value {
s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
s.Unimplementedf("unhandled OCONV %s -> %s", Econv(int(n.Left.Type.Etype), 0), Econv(int(n.Type.Etype), 0))
s.Unimplementedf("unhandled OCONV %s -> %s", Econv(n.Left.Type.Etype), Econv(n.Type.Etype))
return nil
case ODOTTYPE:
@ -1990,7 +1987,7 @@ func (s *state) expr(n *Node) *ssa.Value {
}
if haspointers(et) {
// TODO: just one write barrier call for all of these writes?
// TODO: maybe just one writeBarrierEnabled check?
// TODO: maybe just one writeBarrier.enabled check?
s.insertWB(et, addr, n.Lineno)
}
}
@ -2263,7 +2260,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
func etypesign(e uint8) int8 {
func etypesign(e EType) int8 {
switch e {
case TINT8, TINT16, TINT32, TINT64, TINT:
return -1
@ -2313,13 +2310,17 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value {
case PPARAM:
// parameter slot
v := s.decladdrs[n]
if v == nil {
if flag_race != 0 && n.String() == ".fp" {
s.Unimplementedf("race detector mishandles nodfp")
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
if v != nil {
return v
}
return v
if n.String() == ".fp" {
// Special arg that points to the frame pointer.
// (Used by the race detector, others?)
aux := s.lookupSymbol(n, &ssa.ArgSymbol{Typ: n.Type, Node: n})
return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
}
s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
return nil
case PAUTO:
// We need to regenerate the address of autos
// at every use. This prevents LEA instructions
@ -2609,13 +2610,14 @@ func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Val
// Note: there must be no GC suspension points between the write and
// the call that this function inserts.
func (s *state) insertWB(t *Type, p *ssa.Value, line int32) {
// if writeBarrierEnabled {
// if writeBarrier.enabled {
// typedmemmove_nostore(&t, p)
// }
bThen := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrierEnabled", 0).Sym}
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TBOOL]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
flag := s.newValue2(ssa.OpLoad, Types[TBOOL], flagaddr, s.mem())
b := s.endBlock()
b.Kind = ssa.BlockIf

View File

@ -34,7 +34,7 @@ func errorexit() {
}
func parserline() int {
if parsing && theparser.Lookahead() > 0 {
if oldparser != 0 && parsing && theparser.Lookahead() > 0 {
// parser has one symbol lookahead
return int(prevlineno)
}
@ -347,9 +347,9 @@ func importdot(opkg *Pkg, pack *Node) {
}
}
func Nod(op int, nleft *Node, nright *Node) *Node {
func Nod(op Op, nleft *Node, nright *Node) *Node {
n := new(Node)
n.Op = uint8(op)
n.Op = op
n.Left = nleft
n.Right = nright
n.Lineno = int32(parserline())
@ -382,7 +382,7 @@ func saveorignode(n *Node) {
if n.Orig != nil {
return
}
norig := Nod(int(n.Op), nil, nil)
norig := Nod(n.Op, nil, nil)
*norig = *n
n.Orig = norig
}
@ -546,11 +546,11 @@ func maptype(key *Type, val *Type) *Type {
if key != nil {
var bad *Type
atype := algtype1(key, &bad)
var mtype int
var mtype EType
if bad == nil {
mtype = int(key.Etype)
mtype = key.Etype
} else {
mtype = int(bad.Etype)
mtype = bad.Etype
}
switch mtype {
default:
@ -581,9 +581,9 @@ func maptype(key *Type, val *Type) *Type {
return t
}
func typ(et int) *Type {
func typ(et EType) *Type {
t := new(Type)
t.Etype = uint8(et)
t.Etype = et
t.Width = BADWIDTH
t.Lineno = int(lineno)
t.Orig = t
@ -777,7 +777,7 @@ func isnil(n *Node) bool {
return true
}
func isptrto(t *Type, et int) bool {
func isptrto(t *Type, et EType) bool {
if t == nil {
return false
}
@ -788,14 +788,14 @@ func isptrto(t *Type, et int) bool {
if t == nil {
return false
}
if int(t.Etype) != et {
if t.Etype != et {
return false
}
return true
}
func Istype(t *Type, et int) bool {
return t != nil && int(t.Etype) == et
func Istype(t *Type, et EType) bool {
return t != nil && t.Etype == et
}
func Isfixedarray(t *Type) bool {
@ -846,10 +846,8 @@ func isideal(t *Type) bool {
return false
}
/*
* given receiver of type t (t == r or t == *r)
* return type to hang methods off (r).
*/
// given receiver of type t (t == r or t == *r)
// return type to hang methods off (r).
func methtype(t *Type, mustname int) *Type {
if t == nil {
return nil
@ -890,7 +888,7 @@ func methtype(t *Type, mustname int) *Type {
return t
}
func cplxsubtype(et int) int {
func cplxsubtype(et EType) EType {
switch et {
case TCOMPLEX64:
return TFLOAT32
@ -899,7 +897,7 @@ func cplxsubtype(et int) int {
return TFLOAT64
}
Fatalf("cplxsubtype: %v\n", Econv(int(et), 0))
Fatalf("cplxsubtype: %v\n", Econv(et))
return 0
}
@ -1056,7 +1054,7 @@ func eqtypenoname(t1 *Type, t2 *Type) bool {
// Is type src assignment compatible to type dst?
// If so, return op code to use in conversion.
// If not, return 0.
func assignop(src *Type, dst *Type, why *string) int {
func assignop(src *Type, dst *Type, why *string) Op {
if why != nil {
*why = ""
}
@ -1180,7 +1178,7 @@ func assignop(src *Type, dst *Type, why *string) int {
// Can we convert a value of type src to a value of type dst?
// If so, return op code to use in conversion (maybe OCONVNOP).
// If not, return 0.
func convertop(src *Type, dst *Type, why *string) int {
func convertop(src *Type, dst *Type, why *string) Op {
if why != nil {
*why = ""
}
@ -1383,9 +1381,7 @@ func substAny(tp **Type, types *[]*Type) {
}
}
/*
* Is this a 64-bit type?
*/
// Is this a 64-bit type?
func Is64(t *Type) bool {
if t == nil {
return false
@ -1398,12 +1394,10 @@ func Is64(t *Type) bool {
return false
}
/*
* Is a conversion between t1 and t2 a no-op?
*/
// Is a conversion between t1 and t2 a no-op?
func Noconv(t1 *Type, t2 *Type) bool {
e1 := int(Simtype[t1.Etype])
e2 := int(Simtype[t2.Etype])
e1 := Simtype[t1.Etype]
e2 := Simtype[t2.Etype]
switch e1 {
case TINT8, TUINT8:
@ -1501,18 +1495,16 @@ func syslook(name string, copy int) *Node {
return n
}
/*
* compute a hash value for type t.
* if t is a method type, ignore the receiver
* so that the hash can be used in interface checks.
* %T already contains
* all the necessary logic to generate a representation
* of the type that completely describes it.
* using smprint here avoids duplicating that code.
* using md5 here is overkill, but i got tired of
* accidental collisions making the runtime think
* two types are equal when they really aren't.
*/
// compute a hash value for type t.
// if t is a method type, ignore the receiver
// so that the hash can be used in interface checks.
// %T already contains
// all the necessary logic to generate a representation
// of the type that completely describes it.
// using smprint here avoids duplicating that code.
// using md5 here is overkill, but i got tired of
// accidental collisions making the runtime think
// two types are equal when they really aren't.
func typehash(t *Type) uint32 {
var p string
@ -1613,12 +1605,10 @@ func printframenode(n *Node) {
}
}
/*
* calculate sethi/ullman number
* roughly how many registers needed to
* compile a node. used to compile the
* hardest side first to minimize registers.
*/
// calculate sethi/ullman number
// roughly how many registers needed to
// compile a node. used to compile the
// hardest side first to minimize registers.
func ullmancalc(n *Node) {
if n == nil {
return
@ -1643,9 +1633,9 @@ func ullmancalc(n *Node) {
ul = UINF
goto out
// hard with race detector
// hard with instrumented code
case OANDAND, OOROR:
if flag_race != 0 {
if instrumenting {
ul = UINF
goto out
}
@ -1673,7 +1663,7 @@ out:
n.Ullman = uint8(ul)
}
func badtype(o int, tl *Type, tr *Type) {
func badtype(op Op, tl *Type, tr *Type) {
fmt_ := ""
if tl != nil {
fmt_ += fmt.Sprintf("\n\t%v", tl)
@ -1692,12 +1682,10 @@ func badtype(o int, tl *Type, tr *Type) {
}
s := fmt_
Yyerror("illegal types for operand: %v%s", Oconv(int(o), 0), s)
Yyerror("illegal types for operand: %v%s", Oconv(int(op), 0), s)
}
/*
* iterator to walk a structure declaration
*/
// iterator to walk a structure declaration
func Structfirst(s *Iter, nn **Type) *Type {
var t *Type
@ -1749,9 +1737,7 @@ func structnext(s *Iter) *Type {
return t
}
/*
* iterator to this and inargs in a function
*/
// iterator to this and inargs in a function
func funcfirst(s *Iter, t *Type) *Type {
var fp *Type
@ -1823,8 +1809,8 @@ func getinargx(t *Type) *Type {
// Brcom returns !(op).
// For example, Brcom(==) is !=.
func Brcom(a int) int {
switch a {
func Brcom(op Op) Op {
switch op {
case OEQ:
return ONE
case ONE:
@ -1838,14 +1824,14 @@ func Brcom(a int) int {
case OGE:
return OLT
}
Fatalf("brcom: no com for %v\n", Oconv(a, 0))
return a
Fatalf("brcom: no com for %v\n", Oconv(int(op), 0))
return op
}
// Brrev returns reverse(op).
// For example, Brrev(<) is >.
func Brrev(a int) int {
switch a {
func Brrev(op Op) Op {
switch op {
case OEQ:
return OEQ
case ONE:
@ -1859,14 +1845,12 @@ func Brrev(a int) int {
case OGE:
return OLE
}
Fatalf("brrev: no rev for %v\n", Oconv(a, 0))
return a
Fatalf("brrev: no rev for %v\n", Oconv(int(op), 0))
return op
}
/*
* return side effect-free n, appending side effects to init.
* result is assignable if n is.
*/
// return side effect-free n, appending side effects to init.
// result is assignable if n is.
func safeexpr(n *Node, init **NodeList) *Node {
if n == nil {
return nil
@ -1935,10 +1919,8 @@ func copyexpr(n *Node, t *Type, init **NodeList) *Node {
return l
}
/*
* return side-effect free and cheap n, appending side effects to init.
* result may not be assignable.
*/
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
func cheapexpr(n *Node, init **NodeList) *Node {
switch n.Op {
case ONAME, OLITERAL:
@ -1963,14 +1945,10 @@ func Setmaxarg(t *Type, extra int32) {
}
}
/*
* unicode-aware case-insensitive strcmp
*/
// unicode-aware case-insensitive strcmp
/*
* code to resolve elided DOTs
* in embedded types
*/
// code to resolve elided DOTs
// in embedded types
// search depth 0 --
// return count of fields+methods
@ -2103,16 +2081,14 @@ func adddot(n *Node) *Node {
return n
}
/*
* code to help generate trampoline
* functions for methods on embedded
* subtypes.
* these are approx the same as
* the corresponding adddot routines
* except that they expect to be called
* with unique tasks and they return
* the actual methods.
*/
// code to help generate trampoline
// functions for methods on embedded
// subtypes.
// these are approx the same as
// the corresponding adddot routines
// except that they expect to be called
// with unique tasks and they return
// the actual methods.
type Symlink struct {
field *Type
link *Symlink
@ -2260,9 +2236,7 @@ func expandmeth(t *Type) {
}
}
/*
* Given funarg struct list, return list of ODCLFIELD Node fn args.
*/
// Given funarg struct list, return list of ODCLFIELD Node fn args.
func structargs(tl **Type, mustname int) *NodeList {
var savet Iter
var a *Node
@ -2293,29 +2267,27 @@ func structargs(tl **Type, mustname int) *NodeList {
return args
}
/*
* Generate a wrapper function to convert from
* a receiver of type T to a receiver of type U.
* That is,
*
* func (t T) M() {
* ...
* }
*
* already exists; this function generates
*
* func (u U) M() {
* u.M()
* }
*
* where the types T and U are such that u.M() is valid
* and calls the T.M method.
* The resulting function is for use in method tables.
*
* rcvr - U
* method - M func (t T)(), a TFIELD type struct
* newnam - the eventual mangled name of this function
*/
// Generate a wrapper function to convert from
// a receiver of type T to a receiver of type U.
// That is,
//
// func (t T) M() {
// ...
// }
//
// already exists; this function generates
//
// func (u U) M() {
// u.M()
// }
//
// where the types T and U are such that u.M() is valid
// and calls the T.M method.
// The resulting function is for use in method tables.
//
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
// newnam - the eventual mangled name of this function
var genwrapper_linehistdone int = 0
@ -2405,7 +2377,7 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
dot := adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
// generate call
if flag_race == 0 && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
if !instrumenting && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
// generate tail call: adjust pointer receiver and jump to embedded method.
dot = dot.Left // skip final .M
if !Isptr[dotlist[0].field.Type.Etype] {
@ -2511,9 +2483,7 @@ func hashfor(t *Type) *Node {
return n
}
/*
* Generate a helper function to compute the hash of a value of type t.
*/
// Generate a helper function to compute the hash of a value of type t.
func genhash(sym *Sym, t *Type) {
if Debug['r'] != 0 {
fmt.Printf("genhash %v %v\n", sym, t)
@ -2748,9 +2718,7 @@ func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
return nif
}
/*
* Generate a helper function to check equality of two values of type t.
*/
// Generate a helper function to check equality of two values of type t.
func geneq(sym *Sym, t *Type) {
if Debug['r'] != 0 {
fmt.Printf("geneq %v %v\n", sym, t)
@ -3020,17 +2988,15 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool
return true
}
/*
* even simpler simtype; get rid of ptr, bool.
* assuming that the front end has rejected
* all the invalid conversions (like ptr -> bool)
*/
func Simsimtype(t *Type) int {
// even simpler simtype; get rid of ptr, bool.
// assuming that the front end has rejected
// all the invalid conversions (like ptr -> bool)
func Simsimtype(t *Type) EType {
if t == nil {
return 0
}
et := int(Simtype[t.Etype])
et := Simtype[t.Etype]
switch et {
case TPTR32:
et = TUINT32
@ -3062,9 +3028,7 @@ func liststmt(l *NodeList) *Node {
return n
}
/*
* return nelem of list
*/
// return nelem of list
func structcount(t *Type) int {
var s Iter
@ -3075,11 +3039,9 @@ func structcount(t *Type) int {
return v
}
/*
* return power of 2 of the constant
* operand. -1 if it is not a power of 2.
* 1000+ if it is a -(power of 2)
*/
// return power of 2 of the constant
// operand. -1 if it is not a power of 2.
// 1000+ if it is a -(power of 2)
func powtwo(n *Node) int {
if n == nil || n.Op != OLITERAL || n.Type == nil {
return -1
@ -3113,12 +3075,10 @@ func powtwo(n *Node) int {
return -1
}
/*
* return the unsigned type for
* a signed integer type.
* returns T if input is not a
* signed integer type.
*/
// return the unsigned type for
// a signed integer type.
// returns T if input is not a
// signed integer type.
func tounsigned(t *Type) *Type {
// this is types[et+1], but not sure
// that this relation is immutable
@ -3146,10 +3106,8 @@ func tounsigned(t *Type) *Type {
return t
}
/*
* magic number for signed division
* see hacker's delight chapter 10
*/
// magic number for signed division
// see hacker's delight chapter 10
func Smagic(m *Magic) {
var mask uint64
@ -3243,10 +3201,8 @@ func Smagic(m *Magic) {
m.S = p - m.W
}
/*
* magic number for unsigned division
* see hacker's delight chapter 10
*/
// magic number for unsigned division
// see hacker's delight chapter 10
func Umagic(m *Magic) {
var mask uint64
@ -3353,15 +3309,13 @@ func ngotype(n *Node) *Sym {
return nil
}
/*
* Convert raw string to the prefix that will be used in the symbol
* table. All control characters, space, '%' and '"', as well as
* non-7-bit clean bytes turn into %xx. The period needs escaping
* only in the last segment of the path, and it makes for happier
* users if we escape that as little as possible.
*
* If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
*/
// Convert raw string to the prefix that will be used in the symbol
// table. All control characters, space, '%' and '"', as well as
// non-7-bit clean bytes turn into %xx. The period needs escaping
// only in the last segment of the path, and it makes for happier
// users if we escape that as little as possible.
//
// If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
func pathtoprefix(s string) string {
slash := strings.LastIndex(s, "/")
for i := 0; i < len(s); i++ {
@ -3479,10 +3433,8 @@ func checknil(x *Node, init **NodeList) {
*init = list(*init, n)
}
/*
* Can this type be stored directly in an interface word?
* Yes, if the representation is a single pointer.
*/
// Can this type be stored directly in an interface word?
// Yes, if the representation is a single pointer.
func isdirectiface(t *Type) bool {
switch t.Etype {
case TPTR32,

Some files were not shown because too many files have changed in this diff Show More